]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
authorDavid S. Miller <davem@davemloft.net>
Tue, 9 Oct 2018 06:42:44 +0000 (23:42 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 9 Oct 2018 06:42:44 +0000 (23:42 -0700)
Alexei Starovoitov says:

====================
pull-request: bpf-next 2018-10-08

The following pull-request contains BPF updates for your *net-next* tree.

The main changes are:

1) sk_lookup_[tcp|udp] and sk_release helpers from Joe Stringer which allow
BPF programs to perform lookups for sockets in a network namespace. This would
allow programs to determine early on in processing whether the stack is
expecting to receive the packet, and perform some action (eg drop,
forward somewhere) based on this information.

2) per-cpu cgroup local storage from Roman Gushchin.
Per-cpu cgroup local storage is very similar to simple cgroup storage
except all the data is per-cpu. The main goal of per-cpu variant is to
implement super fast counters (e.g. packet counters), which don't require
neither lookups, neither atomic operations in a fast path.
The example of these hybrid counters is in selftests/bpf/netcnt_prog.c

3) allow HW offload of programs with BPF-to-BPF function calls from Quentin Monnet

4) support more than 64-byte key/value in HW offloaded BPF maps from Jakub Kicinski

5) rename of libbpf interfaces from Andrey Ignatov.
libbpf is maturing as a library and should follow good practices in
library design and implementation to play well with other libraries.
This patch set brings consistent naming convention to global symbols.

6) relicense libbpf as LGPL-2.1 OR BSD-2-Clause from Alexei Starovoitov
to let Apache2 projects use libbpf

7) various AF_XDP fixes from Björn and Magnus
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
1099 files changed:
Documentation/devicetree/bindings/input/gpio-keys.txt
Documentation/devicetree/bindings/mips/mscc.txt
Documentation/devicetree/bindings/net/mscc-ocelot.txt
Documentation/devicetree/bindings/phy/phy-ocelot-serdes.txt [new file with mode: 0644]
Documentation/fb/uvesafb.txt
Documentation/networking/devlink-params-bnxt.txt [new file with mode: 0644]
Documentation/networking/devlink-params.txt [new file with mode: 0644]
Documentation/networking/ip-sysctl.txt
Documentation/networking/rxrpc.txt
Documentation/networking/xfrm_device.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
arch/arm/boot/dts/bcm63138.dtsi
arch/arm/boot/dts/stm32mp157c.dtsi
arch/arm/boot/dts/sun8i-r40.dtsi
arch/arm/mm/ioremap.c
arch/arm/tools/syscall.tbl
arch/arm64/kvm/guest.c
arch/arm64/mm/hugetlbpage.c
arch/mips/boot/dts/mscc/ocelot.dtsi
arch/powerpc/include/asm/setup.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/tm.S
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/lib/checksum_64.S
arch/powerpc/lib/code-patching.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/pkeys.c
arch/powerpc/platforms/powernv/pci-ioda-tce.c
arch/riscv/include/asm/asm-prototypes.h [new file with mode: 0644]
arch/riscv/kernel/setup.c
arch/x86/boot/compressed/mem_encrypt.S
arch/x86/entry/vdso/Makefile
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/events/amd/uncore.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/uv/uv.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/tsc.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
block/blk-mq-tag.c
block/blk-mq.c
block/elevator.c
drivers/atm/nicstar.c
drivers/base/power/main.c
drivers/block/xen-blkfront.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/bt3c_cs.c
drivers/bluetooth/btbcm.c
drivers/bluetooth/btrsi.c
drivers/bluetooth/btrtl.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_qca.c
drivers/bluetooth/hci_serdev.c
drivers/clocksource/timer-atmel-pit.c
drivers/clocksource/timer-fttmr010.c
drivers/clocksource/timer-ti-32k.c
drivers/cpufreq/qcom-cpufreq-kryo.c
drivers/crypto/caam/caamalg.c
drivers/crypto/chelsio/chcr_algo.c
drivers/crypto/chelsio/chcr_crypto.h
drivers/crypto/mxs-dcp.c
drivers/crypto/qat/qat_c3xxx/adf_drv.c
drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
drivers/crypto/qat/qat_c62x/adf_drv.c
drivers/crypto/qat/qat_c62xvf/adf_drv.c
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
drivers/dax/device.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/arm/malidp_hw.c
drivers/gpu/drm/arm/malidp_hw.h
drivers/gpu/drm/arm/malidp_mw.c
drivers/gpu/drm/arm/malidp_regs.h
drivers/gpu/drm/drm_client.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_lease.c
drivers/gpu/drm/drm_panel.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/exynos/exynos_drm_iommu.h
drivers/gpu/drm/i2c/tda9950.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/hid/hid-ids.h
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_uapi.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/pio.c
drivers/infiniband/hw/hfi1/pio.h
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/keyboard/atakbd.c
drivers/input/misc/uinput.c
drivers/input/mouse/elantech.c
drivers/input/touchscreen/egalax_ts.c
drivers/iommu/amd_iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel-pasid.h
drivers/iommu/rockchip-iommu.c
drivers/isdn/gigaset/asyncdata.c
drivers/isdn/gigaset/ev-layer.c
drivers/isdn/gigaset/isocdata.c
drivers/isdn/hisax/w6692.c
drivers/md/bcache/bcache.h
drivers/md/bcache/journal.c
drivers/md/bcache/super.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-cache-target.c
drivers/md/dm-mpath.c
drivers/md/dm-raid.c
drivers/md/dm-thin-metadata.c
drivers/media/v4l2-core/v4l2-event.c
drivers/media/v4l2-core/v4l2-fh.c
drivers/mmc/core/host.c
drivers/mmc/core/slot-gpio.c
drivers/mmc/host/renesas_sdhi_sys_dmac.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/b53/Kconfig
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/mt7530.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amd/declance.c
drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
drivers/net/ethernet/cavium/liquidio/lio_core.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
drivers/net/ethernet/chelsio/cxgb4/sched.c
drivers/net/ethernet/chelsio/cxgb4/sched.h
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
drivers/net/ethernet/freescale/dpaa2/Kconfig [new file with mode: 0644]
drivers/net/ethernet/freescale/dpaa2/Makefile
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c [new file with mode: 0644]
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h [new file with mode: 0644]
drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
drivers/net/ethernet/freescale/dpaa2/dpni.c
drivers/net/ethernet/freescale/dpaa2/dpni.h
drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h [new file with mode: 0644]
drivers/net/ethernet/freescale/dpaa2/dprtc.c [new file with mode: 0644]
drivers/net/ethernet/freescale/dpaa2/dprtc.h [new file with mode: 0644]
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fman/mac.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
drivers/net/ethernet/huawei/hinic/hinic_main.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/ice/Makefile
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_common.h
drivers/net/ethernet/intel/ice/ice_controlq.c
drivers/net/ethernet/intel/ice/ice_controlq.h
drivers/net/ethernet/intel/ice/ice_devids.h
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
drivers/net/ethernet/intel/ice/ice_lib.c [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_lib.h [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_sched.c
drivers/net/ethernet/intel/ice/ice_sched.h
drivers/net/ethernet/intel/ice/ice_sriov.c [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_sriov.h [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_status.h
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_switch.h
drivers/net/ethernet/intel/ice/ice_txrx.h
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h [new file with mode: 0644]
drivers/net/ethernet/intel/ixgbe/Makefile
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h [new file with mode: 0644]
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c [new file with mode: 0644]
drivers/net/ethernet/intel/ixgbevf/ipsec.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mscc/Kconfig
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot.h
drivers/net/ethernet/mscc/ocelot_board.c
drivers/net/ethernet/mscc/ocelot_hsio.h [deleted file]
drivers/net/ethernet/mscc/ocelot_regs.c
drivers/net/ethernet/netronome/nfp/nfp_devlink.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
drivers/net/ethernet/ni/nixge.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_ll2.h
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_ooo.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/falcon/efx.c
drivers/net/ethernet/socionext/sni_ave.c
drivers/net/geneve.c
drivers/net/hamradio/yam.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ieee802154/adf7242.c
drivers/net/ieee802154/ca8210.c
drivers/net/ieee802154/mac802154_hwsim.c
drivers/net/ieee802154/mcr20a.c
drivers/net/phy/aquantia.c
drivers/net/phy/at803x.c
drivers/net/phy/bcm63xx.c
drivers/net/phy/dp83640.c
drivers/net/phy/marvell.c
drivers/net/phy/marvell10g.c
drivers/net/phy/mdio-bcm-unimac.c
drivers/net/phy/mdio-thunder.c
drivers/net/phy/microchip.c
drivers/net/phy/microchip_t1.c
drivers/net/phy/mscc.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/asix_common.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/lan78xx.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/sr9800.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wimax/i2400m/control.c
drivers/net/wireless/ath/ath10k/Kconfig
drivers/net/wireless/ath/ath10k/ahb.c
drivers/net/wireless/ath/ath10k/bmi.c
drivers/net/wireless/ath/ath10k/bmi.h
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/debug.h
drivers/net/wireless/ath/ath10k/debugfs_sta.c
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/hw.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/mac.h
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/rx_desc.h
drivers/net/wireless/ath/ath10k/sdio.c
drivers/net/wireless/ath/ath10k/snoc.c
drivers/net/wireless/ath/ath10k/targaddrs.h
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/usb.c
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath10k/wow.c
drivers/net/wireless/ath/ath5k/debug.c
drivers/net/wireless/ath/ath6kl/init.c
drivers/net/wireless/ath/ath6kl/main.c
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/common-debug.c
drivers/net/wireless/ath/ath9k/common-debug.h
drivers/net/wireless/ath/ath9k/common-spectral.c
drivers/net/wireless/ath/ath9k/common-spectral.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug_sta.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/tx99.c
drivers/net/wireless/ath/carl9170/tx.c
drivers/net/wireless/ath/wcn36xx/dxe.c
drivers/net/wireless/ath/wcn36xx/smd.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/pm.c
drivers/net/wireless/ath/wil6210/rx_reorder.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/txrx_edma.c
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/ath/wil6210/wmi.h
drivers/net/wireless/broadcom/b43/b43.h
drivers/net/wireless/broadcom/b43/dma.c
drivers/net/wireless/broadcom/b43/main.c
drivers/net/wireless/broadcom/b43legacy/dma.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c
drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
drivers/net/wireless/intel/ipw2x00/ipw2200.c
drivers/net/wireless/intel/iwlwifi/cfg/1000.c
drivers/net/wireless/intel/iwlwifi/cfg/2000.c
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/cfg/5000.c
drivers/net/wireless/intel/iwlwifi/cfg/6000.c
drivers/net/wireless/intel/iwlwifi/cfg/7000.c
drivers/net/wireless/intel/iwlwifi/cfg/8000.c
drivers/net/wireless/intel/iwlwifi/cfg/9000.c
drivers/net/wireless/intel/iwlwifi/dvm/agn.h
drivers/net/wireless/intel/iwlwifi/dvm/calib.c
drivers/net/wireless/intel/iwlwifi/dvm/calib.h
drivers/net/wireless/intel/iwlwifi/dvm/commands.h
drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/dvm/dev.h
drivers/net/wireless/intel/iwlwifi/dvm/devices.c
drivers/net/wireless/intel/iwlwifi/dvm/led.c
drivers/net/wireless/intel/iwlwifi/dvm/led.h
drivers/net/wireless/intel/iwlwifi/dvm/lib.c
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/dvm/main.c
drivers/net/wireless/intel/iwlwifi/dvm/power.c
drivers/net/wireless/intel/iwlwifi/dvm/power.h
drivers/net/wireless/intel/iwlwifi/dvm/rs.c
drivers/net/wireless/intel/iwlwifi/dvm/rs.h
drivers/net/wireless/intel/iwlwifi/dvm/rx.c
drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
drivers/net/wireless/intel/iwlwifi/dvm/scan.c
drivers/net/wireless/intel/iwlwifi/dvm/sta.c
drivers/net/wireless/intel/iwlwifi/dvm/tt.c
drivers/net/wireless/intel/iwlwifi/dvm/tt.h
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.h
drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
drivers/net/wireless/intel/iwlwifi/fw/api/power.h
drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/fw/img.h
drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h
drivers/net/wireless/intel/iwlwifi/fw/runtime.h
drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-debug.c
drivers/net/wireless/intel/iwlwifi/iwl-debug.h
drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h
drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.h
drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
drivers/net/wireless/intel/iwlwifi/iwl-io.c
drivers/net/wireless/intel/iwlwifi/iwl-io.h
drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/iwl-scd.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/binding.c
drivers/net/wireless/intel/iwlwifi/mvm/coex.c
drivers/net/wireless/intel/iwlwifi/mvm/constants.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/led.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/power.c
drivers/net/wireless/intel/iwlwifi/mvm/quota.c
drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.h
drivers/net/wireless/intel/iwlwifi/mvm/rx.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sf.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
drivers/net/wireless/intel/iwlwifi/mvm/testmode.h
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
drivers/net/wireless/intel/iwlwifi/mvm/tof.c
drivers/net/wireless/intel/iwlwifi/mvm/tof.h
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intersil/orinoco/orinoco_usb.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/marvell/libertas_tf/if_usb.c
drivers/net/wireless/marvell/mwifiex/usb.c
drivers/net/wireless/mediatek/mt76/Kconfig
drivers/net/wireless/mediatek/mt76/Makefile
drivers/net/wireless/mediatek/mt76/debugfs.c
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/dma.h
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mmio.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
drivers/net/wireless/mediatek/mt76/mt76x0/core.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
drivers/net/wireless/mediatek/mt76/mt76x0/dma.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
drivers/net/wireless/mediatek/mt76/mt76x0/init.c
drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
drivers/net/wireless/mediatek/mt76/mt76x0/mac.h [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x0/main.c
drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
drivers/net/wireless/mediatek/mt76/mt76x0/pci.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
drivers/net/wireless/mediatek/mt76/mt76x0/regs.h [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
drivers/net/wireless/mediatek/mt76/mt76x0/tx.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
drivers/net/wireless/mediatek/mt76/mt76x0/usb.h [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x0/util.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x02.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_dma.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_mac.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_phy.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_phy.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_regs.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_trace.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_trace.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_usb.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x02_util.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2.h [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/Makefile [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/init.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/mac.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/mac.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/pci.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/phy.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/usb.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_common.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_core.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_dma.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_dma.h [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_init.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_mac.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_mac.h [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_main.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_pci.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_phy.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_regs.h [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_trace.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_trace.h [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_tx.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2_usb.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2u.h [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2u_core.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2u_init.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2u_main.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c [deleted file]
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/mediatek/mt76/usb_mcu.c
drivers/net/wireless/quantenna/qtnfmac/Makefile
drivers/net/wireless/quantenna/qtnfmac/bus.h
drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
drivers/net/wireless/quantenna/qtnfmac/commands.c
drivers/net/wireless/quantenna/qtnfmac/core.c
drivers/net/wireless/quantenna/qtnfmac/core.h
drivers/net/wireless/quantenna/qtnfmac/event.c
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c [new file with mode: 0644]
drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h [new file with mode: 0644]
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c [new file with mode: 0644]
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h [new file with mode: 0644]
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h [new file with mode: 0644]
drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c [deleted file]
drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h [deleted file]
drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h [deleted file]
drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h [deleted file]
drivers/net/wireless/quantenna/qtnfmac/qlink.h
drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
drivers/net/wireless/ralink/rt2x00/rt2800lib.h
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
drivers/net/wireless/ralink/rt2x00/rt2800pci.c
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
drivers/net/wireless/realtek/rtlwifi/wifi.h
drivers/net/wireless/rsi/rsi_91x_hal.c
drivers/net/wireless/rsi/rsi_91x_mac80211.c
drivers/net/wireless/rsi/rsi_91x_usb.c
drivers/net/wireless/rsi/rsi_common.h
drivers/net/wireless/st/cw1200/txrx.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/sdio.c
drivers/net/wireless/ti/wlcore/wlcore.h
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/hash.c
drivers/net/xen-netback/interface.c
drivers/nvme/host/multipath.c
drivers/pci/controller/dwc/pcie-designware.c
drivers/pci/controller/dwc/pcie-designware.h
drivers/pci/controller/pci-mvebu.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/pci.c
drivers/phy/Kconfig
drivers/phy/Makefile
drivers/phy/mscc/Kconfig [new file with mode: 0644]
drivers/phy/mscc/Makefile [new file with mode: 0644]
drivers/phy/mscc/phy-ocelot-serdes.c [new file with mode: 0644]
drivers/pinctrl/intel/pinctrl-cannonlake.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/pinctrl-amd.c
drivers/regulator/bd71837-regulator.c
drivers/regulator/core.c
drivers/regulator/of_regulator.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.c
drivers/s390/net/qeth_core_mpc.h
drivers/soc/fsl/qbman/qman.c
drivers/soc/fsl/qe/ucc.c
drivers/spi/spi-gpio.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi-tegra20-slink.c
drivers/ssb/driver_chipcommon.c
drivers/staging/fsl-dpaa2/Kconfig
drivers/staging/fsl-dpaa2/Makefile
drivers/staging/fsl-dpaa2/rtc/Makefile [deleted file]
drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h [deleted file]
drivers/staging/fsl-dpaa2/rtc/dprtc.c [deleted file]
drivers/staging/fsl-dpaa2/rtc/dprtc.h [deleted file]
drivers/staging/fsl-dpaa2/rtc/rtc.c [deleted file]
drivers/staging/fsl-dpaa2/rtc/rtc.h [deleted file]
drivers/vhost/net.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
drivers/video/fbdev/pxa168fb.c
drivers/video/fbdev/stifb.c
fs/afs/addr_list.c
fs/afs/internal.h
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/smb2ops.c
fs/cifs/transport.c
fs/dax.c
fs/ext2/inode.c
fs/ioctl.c
fs/iomap.c
fs/nfsd/vfs.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/refcounttree.c
fs/overlayfs/copy_up.c
fs/overlayfs/file.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/util.c
fs/proc/base.c
fs/pstore/ram.c
fs/read_write.c
fs/xattr.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_attr_remote.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/scrub/alloc.c
fs/xfs/scrub/inode.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_buf_item.h
fs/xfs/xfs_inode.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_reflink.c
fs/xfs/xfs_trace.h
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans_buf.c
include/drm/drm_client.h
include/drm/drm_panel.h
include/dt-bindings/net/mscc-phy-vsc8531.h
include/dt-bindings/phy/phy-ocelot-serdes.h [new file with mode: 0644]
include/linux/avf/virtchnl.h
include/linux/dns_resolver.h
include/linux/fs.h
include/linux/hugetlb.h
include/linux/linkmode.h [new file with mode: 0644]
include/linux/mfd/rohm-bd718x7.h
include/linux/mii.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/transobj.h
include/linux/mlx5/vport.h
include/linux/mm.h
include/linux/mmzone.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/nf_conntrack_common.h
include/linux/netlink.h
include/linux/phy.h
include/linux/phy/phy.h
include/linux/qed/qed_if.h
include/linux/regulator/machine.h
include/linux/skbuff.h
include/linux/spi/spi-mem.h
include/linux/uio.h
include/linux/usb/usbnet.h
include/linux/virtio_net.h
include/media/v4l2-fh.h
include/net/act_api.h
include/net/af_rxrpc.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bonding.h
include/net/cfg80211.h
include/net/devlink.h
include/net/inet_sock.h
include/net/ip.h
include/net/ip6_route.h
include/net/ip_fib.h
include/net/netfilter/ipv4/nf_conntrack_ipv4.h
include/net/netfilter/ipv6/nf_conntrack_ipv6.h
include/net/netfilter/nf_conntrack_core.h
include/net/netfilter/nf_conntrack_l4proto.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nf_tables_core.h
include/net/netlink.h
include/net/pkt_cls.h
include/net/sock.h
include/net/tcp.h
include/net/tls.h
include/net/udp.h
include/soc/mscc/ocelot_hsio.h [new file with mode: 0644]
include/trace/events/migrate.h
include/trace/events/rxrpc.h
include/uapi/asm-generic/hugetlb_encode.h
include/uapi/linux/dns_resolver.h [new file with mode: 0644]
include/uapi/linux/memfd.h
include/uapi/linux/mman.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/netfilter/xt_cgroup.h
include/uapi/linux/netfilter/xt_quota.h
include/uapi/linux/netlink.h
include/uapi/linux/pkt_sched.h
include/uapi/linux/shm.h
ipc/shm.c
kernel/bpf/local_storage.c
kernel/bpf/verifier.c
kernel/dma/Kconfig
kernel/events/core.c
kernel/locking/test-ww_mutex.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/sched.h
lib/nlattr.c
mm/gup_benchmark.c
mm/huge_memory.c
mm/hugetlb.c
mm/madvise.c
mm/migrate.c
mm/page_alloc.c
mm/rmap.c
mm/vmscan.c
mm/vmstat.c
net/bluetooth/bnep/core.c
net/bluetooth/cmtp/core.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hidp/core.c
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bluetooth/smp.c
net/bluetooth/smp.h
net/bpfilter/bpfilter_kern.c
net/bridge/br_fdb.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/bridge/br_netfilter_hooks.c
net/core/devlink.c
net/core/ethtool.c
net/core/fib_rules.c
net/core/gen_stats.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/netpoll.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/dccp/input.c
net/dccp/ipv4.c
net/dns_resolver/dns_key.c
net/dns_resolver/dns_query.c
net/ipv4/datagram.c
net/ipv4/devinet.c
net/ipv4/esp4.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_sockglue.c
net/ipv4/ipmr.c
net/ipv4/metrics.c
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv4/xfrm4_input.c
net/ipv4/xfrm4_mode_transport.c
net/ipv6/addrconf.c
net/ipv6/addrlabel.c
net/ipv6/esp6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6mr.c
net/ipv6/ndisc.c
net/ipv6/netfilter/ip6t_ipv6header.c
net/ipv6/netfilter/ip6t_rt.c
net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/udp.c
net/ipv6/udp_offload.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_mode_transport.c
net/ipv6/xfrm6_output.c
net/mac80211/cfg.c
net/mac80211/iface.c
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/mac80211/status.c
net/mac80211/tdls.c
net/mac80211/tx.c
net/mpls/af_mpls.c
net/ncsi/internal.h
net/ncsi/ncsi-cmd.c
net/ncsi/ncsi-pkt.h
net/ncsi/ncsi-rsp.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_generic.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_conntrack_proto_icmp.c
net/netfilter/nf_conntrack_proto_icmpv6.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_ip.c
net/netfilter/nf_nat_helper.c
net/netfilter/nf_nat_redirect.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nft_cmp.c
net/netfilter/nft_ct.c
net/netfilter/nft_dynset.c
net/netfilter/nft_lookup.c
net/netfilter/nft_meta.c
net/netfilter/nft_objref.c
net/netfilter/nft_osf.c
net/netfilter/nft_reject.c
net/netfilter/nft_rt.c
net/netfilter/nft_set_hash.c
net/netfilter/nft_set_rbtree.c
net/netfilter/nft_xfrm.c [new file with mode: 0644]
net/netfilter/xt_CT.c
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_SECMARK.c
net/netfilter/xt_cgroup.c
net/netfilter/xt_quota.c
net/netfilter/xt_socket.c
net/netlink/af_netlink.c
net/netlink/af_netlink.h
net/openvswitch/conntrack.c
net/openvswitch/datapath.c
net/openvswitch/vport-internal_dev.c
net/packet/af_packet.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_object.c
net/rxrpc/input.c
net/rxrpc/local_event.c
net/rxrpc/local_object.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/rxrpc/protocol.h
net/rxrpc/recvmsg.c
net/rxrpc/skbuff.c
net/rxrpc/utils.c
net/sched/Kconfig
net/sched/Makefile
net/sched/act_api.c
net/sched/act_ipt.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/cls_u32.c
net/sched/sch_api.c
net/sched/sch_fq.c
net/sched/sch_generic.c
net/sched/sch_pie.c
net/sched/sch_taprio.c [new file with mode: 0644]
net/sctp/outqueue.c
net/socket.c
net/tipc/bearer.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/node.c
net/tipc/node.h
net/tipc/socket.c
net/tls/tls_sw.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/scan.c
net/wireless/wext-compat.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
sound/hda/hdac_i915.c
sound/pci/hda/patch_realtek.c
tools/kvm/kvm_stat/kvm_stat
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/forwarding/bridge_sticky_fdb.sh [new file with mode: 0755]
tools/testing/selftests/net/ip_defrag.c
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/net/tls.c
tools/testing/selftests/powerpc/alignment/Makefile
tools/testing/selftests/powerpc/benchmarks/Makefile
tools/testing/selftests/powerpc/cache_shape/Makefile
tools/testing/selftests/powerpc/copyloops/Makefile
tools/testing/selftests/powerpc/dscr/Makefile
tools/testing/selftests/powerpc/math/Makefile
tools/testing/selftests/powerpc/mm/Makefile
tools/testing/selftests/powerpc/pmu/Makefile
tools/testing/selftests/powerpc/pmu/ebb/Makefile
tools/testing/selftests/powerpc/primitives/Makefile
tools/testing/selftests/powerpc/ptrace/Makefile
tools/testing/selftests/powerpc/signal/Makefile
tools/testing/selftests/powerpc/stringloops/Makefile
tools/testing/selftests/powerpc/switch_endian/Makefile
tools/testing/selftests/powerpc/syscalls/Makefile
tools/testing/selftests/powerpc/tm/Makefile
tools/testing/selftests/powerpc/vphn/Makefile
tools/testing/selftests/rseq/param_test.c
tools/testing/selftests/tc-testing/README
tools/testing/selftests/tc-testing/bpf/Makefile [new file with mode: 0644]
tools/testing/selftests/tc-testing/bpf/action.c [new file with mode: 0644]
tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py [new file with mode: 0644]
tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
tools/testing/selftests/tc-testing/tdc_config.py
tools/testing/selftests/x86/test_vdso.c

index 996ce84352cbf11627adc17c13d079cc17601de1..7cccc49b6beade0d60adaafc228b3ed8ecfa34e0 100644 (file)
@@ -1,4 +1,4 @@
-Device-Tree bindings for input/gpio_keys.c keyboard driver
+Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver
 
 Required properties:
        - compatible = "gpio-keys";
index ae15ec33354274669e5589f5c076d2e5aeb97734..bc817e984628a8fd9969ec22615de088d8035c8e 100644 (file)
@@ -41,3 +41,19 @@ Example:
                compatible = "mscc,ocelot-cpu-syscon", "syscon";
                reg = <0x70000000 0x2c>;
        };
+
+o HSIO regs:
+
+The SoC has a few registers (HSIO) handling miscellaneous functionalities:
+configuration and status of PLL5, RCOMP, SyncE, SerDes configurations and
+status, SerDes muxing and a thermal sensor.
+
+Required properties:
+- compatible: Should be "mscc,ocelot-hsio", "syscon", "simple-mfd"
+- reg : Should contain registers location and length
+
+Example:
+       syscon@10d0000 {
+               compatible = "mscc,ocelot-hsio", "syscon", "simple-mfd";
+               reg = <0x10d0000 0x10000>;
+       };
index 0a84711abece92049c9768bddb5fb95e5f6d812b..9e5c17d426ceca2361fd5b28b8bdfa105ab6202e 100644 (file)
@@ -12,7 +12,6 @@ Required properties:
   - "sys"
   - "rew"
   - "qs"
-  - "hsio"
   - "qsys"
   - "ana"
   - "portX" with X from 0 to the number of last port index available on that
@@ -45,7 +44,6 @@ Example:
                reg = <0x1010000 0x10000>,
                      <0x1030000 0x10000>,
                      <0x1080000 0x100>,
-                     <0x10d0000 0x10000>,
                      <0x11e0000 0x100>,
                      <0x11f0000 0x100>,
                      <0x1200000 0x100>,
@@ -59,10 +57,9 @@ Example:
                      <0x1280000 0x100>,
                      <0x1800000 0x80000>,
                      <0x1880000 0x10000>;
-               reg-names = "sys", "rew", "qs", "hsio", "port0",
-                           "port1", "port2", "port3", "port4", "port5",
-                           "port6", "port7", "port8", "port9", "port10",
-                           "qsys", "ana";
+               reg-names = "sys", "rew", "qs", "port0", "port1", "port2",
+                           "port3", "port4", "port5", "port6", "port7",
+                           "port8", "port9", "port10", "qsys", "ana";
                interrupts = <21 22>;
                interrupt-names = "xtr", "inj";
 
diff --git a/Documentation/devicetree/bindings/phy/phy-ocelot-serdes.txt b/Documentation/devicetree/bindings/phy/phy-ocelot-serdes.txt
new file mode 100644 (file)
index 0000000..3322198
--- /dev/null
@@ -0,0 +1,43 @@
+Microsemi Ocelot SerDes muxing driver
+-------------------------------------
+
+On Microsemi Ocelot, there is a handful of registers in HSIO address
+space for setting up the SerDes to switch port muxing.
+
+A SerDes X can be "muxed" to work with switch port Y or Z for example.
+One specific SerDes can also be used as a PCIe interface.
+
+Hence, a SerDes represents an interface, be it an Ethernet or a PCIe one.
+
+There are two kinds of SerDes: SERDES1G supports 10/100Mbps in
+half/full-duplex and 1000Mbps in full-duplex mode while SERDES6G supports
+10/100Mbps in half/full-duplex and 1000/2500Mbps in full-duplex mode.
+
+Also, SERDES6G number (aka "macro") 0 is the only interface supporting
+QSGMII.
+
+This is a child of the HSIO syscon ("mscc,ocelot-hsio", see
+Documentation/devicetree/bindings/mips/mscc.txt) on the Microsemi Ocelot.
+
+Required properties:
+
+- compatible: should be "mscc,vsc7514-serdes"
+- #phy-cells : from the generic phy bindings, must be 2.
+              The first number defines the input port to use for a given
+              SerDes macro. The second defines the macro to use. They are
+              defined in dt-bindings/phy/phy-ocelot-serdes.h
+
+Example:
+
+       serdes: serdes {
+               compatible = "mscc,vsc7514-serdes";
+               #phy-cells = <2>;
+       };
+
+       ethernet {
+               port1 {
+                       phy-handle = <&phy_foo>;
+                       /* Link SERDES1G_5 to port1 */
+                       phys = <&serdes 1 SERDES1G_5>;
+               };
+       };
index f6362d88763b852e0704321af6d22653b95cc4d6..aa924196c36603abcc4d72a619ebce5635050ca4 100644 (file)
@@ -15,7 +15,8 @@ than x86.  Check the v86d documentation for a list of currently supported
 arches.
 
 v86d source code can be downloaded from the following website:
-  http://dev.gentoo.org/~spock/projects/uvesafb
+
+  https://github.com/mjanusz/v86d
 
 Please refer to the v86d documentation for detailed configuration and
 installation instructions.
@@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
 
 --
  Michal Januszewski <spock@gentoo.org>
- Last updated: 2009-03-30
+ Last updated: 2017-10-10
 
  Documentation of the uvesafb options is loosely based on vesafb.txt.
 
diff --git a/Documentation/networking/devlink-params-bnxt.txt b/Documentation/networking/devlink-params-bnxt.txt
new file mode 100644 (file)
index 0000000..481aa30
--- /dev/null
@@ -0,0 +1,18 @@
+enable_sriov           [DEVICE, GENERIC]
+                       Configuration mode: Permanent
+
+ignore_ari             [DEVICE, GENERIC]
+                       Configuration mode: Permanent
+
+msix_vec_per_pf_max    [DEVICE, GENERIC]
+                       Configuration mode: Permanent
+
+msix_vec_per_pf_min    [DEVICE, GENERIC]
+                       Configuration mode: Permanent
+
+gre_ver_check          [DEVICE, DRIVER-SPECIFIC]
+                       Generic Routing Encapsulation (GRE) version check will
+                       be enabled in the device. If disabled, device skips
+                       version checking for incoming packets.
+                       Type: Boolean
+                       Configuration mode: Permanent
diff --git a/Documentation/networking/devlink-params.txt b/Documentation/networking/devlink-params.txt
new file mode 100644 (file)
index 0000000..ae444ff
--- /dev/null
@@ -0,0 +1,42 @@
+Devlink configuration parameters
+================================
+Following is the list of configuration parameters via devlink interface.
+Each parameter can be generic or driver specific and are device level
+parameters.
+
+Note that the driver-specific files should contain the generic params
+they support to, with supported config modes.
+
+Each parameter can be set in different configuration modes:
+       runtime         - set while driver is running, no reset required.
+       driverinit      - applied while driver initializes, requires restart
+                       driver by devlink reload command.
+       permanent       - written to device's non-volatile memory, hard reset
+                       required.
+
+Following is the list of parameters:
+====================================
+enable_sriov           [DEVICE, GENERIC]
+                       Enable Single Root I/O Virtualisation (SRIOV) in
+                       the device.
+                       Type: Boolean
+
+ignore_ari             [DEVICE, GENERIC]
+                       Ignore Alternative Routing-ID Interpretation (ARI)
+                       capability. If enabled, adapter will ignore ARI
+                       capability even when platforms has the support
+                       enabled and creates same number of partitions when
+                       platform does not support ARI.
+                       Type: Boolean
+
+msix_vec_per_pf_max    [DEVICE, GENERIC]
+                       Provides the maximum number of MSIX interrupts that
+                       a device can create. Value is same across all
+                       physical functions (PFs) in the device.
+                       Type: u32
+
+msix_vec_per_pf_min    [DEVICE, GENERIC]
+                       Provides the minimum number of MSIX interrupts required
+                       for the device initialization. Value is same across all
+                       physical functions (PFs) in the device.
+                       Type: u32
index 8313a636dd533540172859653bcfa173c1e03864..960de8fe3f401c7ce4ceee0d5d3d61cb46102319 100644 (file)
@@ -425,7 +425,7 @@ tcp_mtu_probing - INTEGER
          1 - Disabled by default, enabled when an ICMP black hole detected
          2 - Always enabled, use initial MSS of tcp_base_mss.
 
-tcp_probe_interval - INTEGER
+tcp_probe_interval - UNSIGNED INTEGER
        Controls how often to start TCP Packetization-Layer Path MTU
        Discovery reprobe. The default is reprobing every 10 minutes as
        per RFC4821.
index b5407163d53bea922c58af999aeb4c49b17ed266..605e00cdd6beb1d024519ab8417464fc12064724 100644 (file)
@@ -1069,6 +1069,31 @@ The kernel interface functions are as follows:
 
      This function may transmit a PING ACK.
 
+ (*) Get reply timestamp.
+
+       bool rxrpc_kernel_get_reply_time(struct socket *sock,
+                                        struct rxrpc_call *call,
+                                        ktime_t *_ts)
+
+     This allows the timestamp on the first DATA packet of the reply of a
+     client call to be queried, provided that it is still in the Rx ring.  If
+     successful, the timestamp will be stored into *_ts and true will be
+     returned; false will be returned otherwise.
+
+ (*) Get remote client epoch.
+
+       u32 rxrpc_kernel_get_epoch(struct socket *sock,
+                                  struct rxrpc_call *call)
+
+     This allows the epoch that's contained in packets of an incoming client
+     call to be queried.  This value is returned.  The function always
+     successful if the call is still in progress.  It shouldn't be called once
+     the call has expired.  Note that calling this on a local client call only
+     returns the local epoch.
+
+     This value can be used to determine if the remote client has been
+     restarted as it shouldn't change otherwise.
+
 
 =======================
 CONFIGURABLE PARAMETERS
index 50c34ca65efee673e26e549bef0d20db40bcbf30..267f55b5f54a90c26e03a61e9545771bbe1231f4 100644 (file)
@@ -68,6 +68,10 @@ and an indication of whether it is for Rx or Tx.  The driver should
        - verify the algorithm is supported for offloads
        - store the SA information (key, salt, target-ip, protocol, etc)
        - enable the HW offload of the SA
+       - return status value:
+               0             success
+               -EOPNETSUPP   offload not supported, try SW IPsec
+               other         fail the request
 
 The driver can also set an offload_handle in the SA, an opaque void pointer
 that can be used to convey context into the fast-path offload requests.
index 15565de091afdb7232002e364d842c88c21bed7a..54e719d02200cf06874131fb832a06fcf2a9694a 100644 (file)
@@ -324,7 +324,6 @@ F:  Documentation/ABI/testing/sysfs-bus-acpi
 F:     Documentation/ABI/testing/configfs-acpi
 F:     drivers/pci/*acpi*
 F:     drivers/pci/*/*acpi*
-F:     drivers/pci/*/*/*acpi*
 F:     tools/power/acpi/
 
 ACPI APEI
@@ -1251,7 +1250,7 @@ N:        meson
 
 ARM/Annapurna Labs ALPINE ARCHITECTURE
 M:     Tsahee Zidenberg <tsahee@annapurnalabs.com>
-M:     Antoine Tenart <antoine.tenart@free-electrons.com>
+M:     Antoine Tenart <antoine.tenart@bootlin.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-alpine/
@@ -2956,7 +2955,6 @@ F:        include/linux/bcm963xx_tag.h
 
 BROADCOM BNX2 GIGABIT ETHERNET DRIVER
 M:     Rasesh Mody <rasesh.mody@cavium.com>
-M:     Harish Patil <harish.patil@cavium.com>
 M:     Dept-GELinuxNICDev@cavium.com
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -2977,6 +2975,7 @@ F:        drivers/scsi/bnx2i/
 
 BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
 M:     Ariel Elior <ariel.elior@cavium.com>
+M:     Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
 M:     everest-linux-l2@cavium.com
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -4530,7 +4529,11 @@ DPAA2 ETHERNET DRIVER
 M:     Ioana Radulescu <ruxandra.radulescu@nxp.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
-F:     drivers/net/ethernet/freescale/dpaa2
+F:     drivers/net/ethernet/freescale/dpaa2/dpaa2-eth*
+F:     drivers/net/ethernet/freescale/dpaa2/dpni*
+F:     drivers/net/ethernet/freescale/dpaa2/dpkg.h
+F:     drivers/net/ethernet/freescale/dpaa2/Makefile
+F:     drivers/net/ethernet/freescale/dpaa2/Kconfig
 
 DPAA2 ETHERNET SWITCH DRIVER
 M:     Ioana Radulescu <ruxandra.radulescu@nxp.com>
@@ -4541,9 +4544,10 @@ F:       drivers/staging/fsl-dpaa2/ethsw
 
 DPAA2 PTP CLOCK DRIVER
 M:     Yangbo Lu <yangbo.lu@nxp.com>
-L:     linux-kernel@vger.kernel.org
+L:     netdev@vger.kernel.org
 S:     Maintained
-F:     drivers/staging/fsl-dpaa2/rtc
+F:     drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp*
+F:     drivers/net/ethernet/freescale/dpaa2/dprtc*
 
 DPT_I2O SCSI RAID DRIVER
 M:     Adaptec OEM Raid Solutions <aacraid@microsemi.com>
@@ -5470,7 +5474,8 @@ S:        Odd Fixes
 F:     drivers/net/ethernet/agere/
 
 ETHERNET BRIDGE
-M:     Stephen Hemminger <stephen@networkplumber.org>
+M:     Roopa Prabhu <roopa@cumulusnetworks.com>
+M:     Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
 L:     bridge@lists.linux-foundation.org (moderated for non-subscribers)
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net:Bridge
@@ -8190,7 +8195,7 @@ S:        Maintained
 F:     net/dsa/tag_gswip.c
 F:     drivers/net/ethernet/lantiq_xrx200.c
 F:     drivers/net/dsa/lantiq_pce.h
-F:     drivers/net/dsa/intel_gswip.c
+F:     drivers/net/dsa/lantiq_gswip.c
 
 LANTIQ MIPS ARCHITECTURE
 M:     John Crispin <john@phrozen.org>
@@ -8607,7 +8612,6 @@ F:        include/linux/spinlock*.h
 F:     arch/*/include/asm/spinlock*.h
 F:     include/linux/rwlock*.h
 F:     include/linux/mutex*.h
-F:     arch/*/include/asm/mutex*.h
 F:     include/linux/rwsem*.h
 F:     arch/*/include/asm/rwsem.h
 F:     include/linux/seqlock.h
@@ -8753,7 +8757,7 @@ M:        Vivien Didelot <vivien.didelot@savoirfairelinux.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/dsa/mv88e6xxx/
-F:     linux/platform_data/mv88e6xxx.h
+F:     include/linux/platform_data/mv88e6xxx.h
 F:     Documentation/devicetree/bindings/net/dsa/marvell.txt
 
 MARVELL ARMADA DRM SUPPORT
@@ -9725,13 +9729,6 @@ Q:       http://patchwork.linuxtv.org/project/linux-media/list/
 S:     Maintained
 F:     drivers/media/dvb-frontends/mn88473*
 
-PCI DRIVER FOR MOBIVEIL PCIE IP
-M:     Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
-L:     linux-pci@vger.kernel.org
-S:     Supported
-F:     Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
-F:     drivers/pci/controller/pcie-mobiveil.c
-
 MODULE SUPPORT
 M:     Jessica Yu <jeyu@kernel.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
@@ -10958,7 +10955,7 @@ M:      Willy Tarreau <willy@haproxy.com>
 M:     Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
 S:     Odd Fixes
 F:     Documentation/auxdisplay/lcd-panel-cgram.txt
-F:     drivers/misc/panel.c
+F:     drivers/auxdisplay/panel.c
 
 PARALLEL PORT SUBSYSTEM
 M:     Sudip Mukherjee <sudipm.mukherjee@gmail.com>
@@ -11146,6 +11143,13 @@ F:     include/uapi/linux/switchtec_ioctl.h
 F:     include/linux/switchtec.h
 F:     drivers/ntb/hw/mscc/
 
+PCI DRIVER FOR MOBIVEIL PCIE IP
+M:     Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+L:     linux-pci@vger.kernel.org
+S:     Supported
+F:     Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
+F:     drivers/pci/controller/pcie-mobiveil.c
+
 PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
 M:     Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
 M:     Jason Cooper <jason@lakedaemon.net>
@@ -11212,8 +11216,14 @@ F:     tools/pci/
 
 PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
 M:     Russell Currey <ruscur@russell.cc>
+M:     Sam Bobroff <sbobroff@linux.ibm.com>
+M:     Oliver O'Halloran <oohall@gmail.com>
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Supported
+F:     Documentation/PCI/pci-error-recovery.txt
+F:     drivers/pci/pcie/aer.c
+F:     drivers/pci/pcie/dpc.c
+F:     drivers/pci/pcie/err.c
 F:     Documentation/powerpc/eeh-pci-error-recovery.txt
 F:     arch/powerpc/kernel/eeh*.c
 F:     arch/powerpc/platforms/*/eeh*.c
@@ -11982,7 +11992,7 @@ F:      Documentation/scsi/LICENSE.qla4xxx
 F:     drivers/scsi/qla4xxx/
 
 QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M:     Harish Patil <harish.patil@cavium.com>
+M:     Shahed Shaikh <Shahed.Shaikh@cavium.com>
 M:     Manish Chopra <manish.chopra@cavium.com>
 M:     Dept-GELinuxNICDev@cavium.com
 L:     netdev@vger.kernel.org
@@ -11990,7 +12000,6 @@ S:      Supported
 F:     drivers/net/ethernet/qlogic/qlcnic/
 
 QLOGIC QLGE 10Gb ETHERNET DRIVER
-M:     Harish Patil <harish.patil@cavium.com>
 M:     Manish Chopra <manish.chopra@cavium.com>
 M:     Dept-GELinuxNICDev@cavium.com
 L:     netdev@vger.kernel.org
@@ -15398,7 +15407,7 @@ S:      Maintained
 UVESAFB DRIVER
 M:     Michal Januszewski <spock@gentoo.org>
 L:     linux-fbdev@vger.kernel.org
-W:     http://dev.gentoo.org/~spock/projects/uvesafb/
+W:     https://github.com/mjanusz/v86d
 S:     Maintained
 F:     Documentation/fb/uvesafb.txt
 F:     drivers/video/fbdev/uvesafb.*
index 0c90c435497921f581a04c56b5c73d51f4df7383..6c3da3e10f07c5a3f94d4ea3efa49d0287aadcc6 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
index b10dccd0958f335ce3d874aa8d9eb171336a882a..3b1baa8605a77e8f724724550e5ec123df608732 100644 (file)
@@ -11,6 +11,7 @@
 #include "sama5d2-pinfunc.h"
 #include <dt-bindings/mfd/atmel-flexcom.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/at91.h>
 
 / {
        model = "Atmel SAMA5D2 PTC EK";
                                                         <PIN_PA30__NWE_NANDWE>,
                                                         <PIN_PB2__NRD_NANDOE>;
                                                bias-pull-up;
+                                               atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
                                        };
 
                                        ale_cle_rdy_cs {
index 43ee992ccdcf70230cf1f50a33c3c51a6b483f2f..6df61518776f7e45ef8a290fd1920ab675ca649c 100644 (file)
                global_timer: timer@1e200 {
                        compatible = "arm,cortex-a9-global-timer";
                        reg = <0x1e200 0x20>;
-                       interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&axi_clk>;
                };
 
                local_timer: local-timer@1e600 {
                        compatible = "arm,cortex-a9-twd-timer";
                        reg = <0x1e600 0x20>;
-                       interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
+                                                 IRQ_TYPE_EDGE_RISING)>;
                        clocks = <&axi_clk>;
                };
 
                twd_watchdog: watchdog@1e620 {
                        compatible = "arm,cortex-a9-twd-wdt";
                        reg = <0x1e620 0x20>;
-                       interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
+                                                 IRQ_TYPE_LEVEL_HIGH)>;
                };
 
                armpll: armpll {
                serial0: serial@600 {
                        compatible = "brcm,bcm6345-uart";
                        reg = <0x600 0x1b>;
-                       interrupts = <GIC_SPI 32 0>;
+                       interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&periph_clk>;
                        clock-names = "periph";
                        status = "disabled";
                serial1: serial@620 {
                        compatible = "brcm,bcm6345-uart";
                        reg = <0x620 0x1b>;
-                       interrupts = <GIC_SPI 33 0>;
+                       interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&periph_clk>;
                        clock-names = "periph";
                        status = "disabled";
                        reg = <0x2000 0x600>, <0xf0 0x10>;
                        reg-names = "nand", "nand-int-base";
                        status = "disabled";
-                       interrupts = <GIC_SPI 38 0>;
+                       interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "nand";
                };
 
index 661be948ab7424759ebfdb2d1c780822d17f38b0..185541a5b69fb58127136284f86341845b963af3 100644 (file)
                        interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&rcc SPI6_K>;
                        resets = <&rcc SPI6_R>;
-                       dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>,
-                              <&mdma1 35 0x0 0x40002 0x0 0x0 0>;
+                       dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
+                              <&mdma1 35 0x0 0x40002 0x0 0x0>;
                        dma-names = "rx", "tx";
                        status = "disabled";
                };
index ffd9f00f74a46da89d88040db9178c8ef01a37d4..5f547c161bafd23a3054b6c084599955d753314b 100644 (file)
                };
 
                hdmi_phy: hdmi-phy@1ef0000 {
-                       compatible = "allwinner,sun8i-r40-hdmi-phy",
-                                    "allwinner,sun50i-a64-hdmi-phy";
+                       compatible = "allwinner,sun8i-r40-hdmi-phy";
                        reg = <0x01ef0000 0x10000>;
                        clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
                                 <&ccu 7>, <&ccu 16>;
index fc91205ff46cebd2218940214c12e6ab84355b21..5bf9443cfbaa63108f8bc11f4356774591e3e29c 100644 (file)
@@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
 
 int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
 {
-       BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
+       BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
 
        return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
                                  PCI_IO_VIRT_BASE + offset + SZ_64K,
index fbc74b5fa3ed26a4bc56657afb3691372aea431e..8edf93b4490fad24e4ef0c4195b6a535b4a87a94 100644 (file)
 396    common  pkey_free               sys_pkey_free
 397    common  statx                   sys_statx
 398    common  rseq                    sys_rseq
+399    common  io_pgetevents           sys_io_pgetevents
index 07256b08226c0c935d7ced6530a4a7a85ee4c276..a6c9fbaeaefcdd71d0ea70c8eeb89c55692f8b66 100644 (file)
@@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
        return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
 }
 
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+       u64 off = core_reg_offset_from_id(reg->id);
+       int size;
+
+       switch (off) {
+       case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+            KVM_REG_ARM_CORE_REG(regs.regs[30]):
+       case KVM_REG_ARM_CORE_REG(regs.sp):
+       case KVM_REG_ARM_CORE_REG(regs.pc):
+       case KVM_REG_ARM_CORE_REG(regs.pstate):
+       case KVM_REG_ARM_CORE_REG(sp_el1):
+       case KVM_REG_ARM_CORE_REG(elr_el1):
+       case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+            KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+               size = sizeof(__u64);
+               break;
+
+       case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+            KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+               size = sizeof(__uint128_t);
+               break;
+
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+               size = sizeof(__u32);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if (KVM_REG_SIZE(reg->id) == size &&
+           IS_ALIGNED(off, size / sizeof(__u32)))
+               return 0;
+
+       return -EINVAL;
+}
+
 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
        /*
@@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
                return -ENOENT;
 
+       if (validate_core_offset(reg))
+               return -EINVAL;
+
        if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
                return -EFAULT;
 
@@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
                return -ENOENT;
 
+       if (validate_core_offset(reg))
+               return -EINVAL;
+
        if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
                return -EINVAL;
 
@@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        }
 
        if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
-               u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
+               u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
                switch (mode) {
                case PSR_AA32_MODE_USR:
+                       if (!system_supports_32bit_el0())
+                               return -EINVAL;
+                       break;
                case PSR_AA32_MODE_FIQ:
                case PSR_AA32_MODE_IRQ:
                case PSR_AA32_MODE_SVC:
                case PSR_AA32_MODE_ABT:
                case PSR_AA32_MODE_UND:
+                       if (!vcpu_el1_is_32bit(vcpu))
+                               return -EINVAL;
+                       break;
                case PSR_MODE_EL0t:
                case PSR_MODE_EL1t:
                case PSR_MODE_EL1h:
+                       if (vcpu_el1_is_32bit(vcpu))
+                               return -EINVAL;
                        break;
                default:
                        err = -EINVAL;
index 192b3ba070755f70d41f13d3c68eaa18b2b7f17d..f58ea503ad014fda52fbab06e6edc743551a4b6c 100644 (file)
@@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
 
                /*
                 * If HW_AFDBM is enabled, then the HW could turn on
-                * the dirty bit for any page in the set, so check
-                * them all.  All hugetlb entries are already young.
+                * the dirty or accessed bit for any page in the set,
+                * so check them all.
                 */
                if (pte_dirty(pte))
                        orig_pte = pte_mkdirty(orig_pte);
+
+               if (pte_young(pte))
+                       orig_pte = pte_mkyoung(orig_pte);
        }
 
        if (valid) {
@@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
        return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
 }
 
+/*
+ * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
+ * and write permission.
+ *
+ * For a contiguous huge pte range we need to check whether or not write
+ * permission has to change only on the first pte in the set. Then for
+ * all the contiguous ptes we need to check whether or not there is a
+ * discrepancy between dirty or young.
+ */
+static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
+{
+       int i;
+
+       if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
+               return 1;
+
+       for (i = 0; i < ncontig; i++) {
+               pte_t orig_pte = huge_ptep_get(ptep + i);
+
+               if (pte_dirty(pte) != pte_dirty(orig_pte))
+                       return 1;
+
+               if (pte_young(pte) != pte_young(orig_pte))
+                       return 1;
+       }
+
+       return 0;
+}
+
 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
                               unsigned long addr, pte_t *ptep,
                               pte_t pte, int dirty)
 {
-       int ncontig, i, changed = 0;
+       int ncontig, i;
        size_t pgsize = 0;
        unsigned long pfn = pte_pfn(pte), dpfn;
        pgprot_t hugeprot;
@@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
        ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
        dpfn = pgsize >> PAGE_SHIFT;
 
+       if (!__cont_access_flags_changed(ptep, pte, ncontig))
+               return 0;
+
        orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
-       if (!pte_same(orig_pte, pte))
-               changed = 1;
 
-       /* Make sure we don't lose the dirty state */
+       /* Make sure we don't lose the dirty or young state */
        if (pte_dirty(orig_pte))
                pte = pte_mkdirty(pte);
 
+       if (pte_young(orig_pte))
+               pte = pte_mkyoung(pte);
+
        hugeprot = pte_pgprot(pte);
        for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
                set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
 
-       return changed;
+       return 1;
 }
 
 void huge_ptep_set_wrprotect(struct mm_struct *mm,
index f7eb612b46ba81348aa8311088af817dc2df5d08..8ce317c5b9edcf6ad665534ba327be0cfe3f15d7 100644 (file)
                        reg = <0x1010000 0x10000>,
                              <0x1030000 0x10000>,
                              <0x1080000 0x100>,
-                             <0x10d0000 0x10000>,
                              <0x11e0000 0x100>,
                              <0x11f0000 0x100>,
                              <0x1200000 0x100>,
                              <0x1280000 0x100>,
                              <0x1800000 0x80000>,
                              <0x1880000 0x10000>;
-                       reg-names = "sys", "rew", "qs", "hsio", "port0",
-                                   "port1", "port2", "port3", "port4", "port5",
-                                   "port6", "port7", "port8", "port9", "port10",
-                                   "qsys", "ana";
+                       reg-names = "sys", "rew", "qs", "port0", "port1",
+                                   "port2", "port3", "port4", "port5", "port6",
+                                   "port7", "port8", "port9", "port10", "qsys",
+                                   "ana";
                        interrupts = <21 22>;
                        interrupt-names = "xtr", "inj";
 
                        pinctrl-0 = <&miim1>;
                        status = "disabled";
                };
+
+               hsio: syscon@10d0000 {
+                       compatible = "mscc,ocelot-hsio", "syscon", "simple-mfd";
+                       reg = <0x10d0000 0x10000>;
+
+                       serdes: serdes {
+                               compatible = "mscc,vsc7514-serdes";
+                               #phy-cells = <2>;
+                       };
+               };
        };
 };
index 1a951b00465d739f0b7268803d9c1a25e92e947f..1fffbba8d6a5e64a5fefdb06a6ecab29f4ec66e5 100644 (file)
@@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
 
 extern unsigned int rtas_data;
 extern unsigned long long memory_limit;
+extern bool init_mem_is_free;
 extern unsigned long klimit;
 extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
 
index ea04dfb8c0927f71e1a89937526f3e1f2d7fd241..2d8fc8c9da7a1f210816bd9734c3d8453d8fc04e 100644 (file)
@@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
 
 #ifdef CONFIG_PPC_DENORMALISATION
        mfspr   r10,SPRN_HSRR1
-       mfspr   r11,SPRN_HSRR0          /* save HSRR0 */
        andis.  r10,r10,(HSRR1_DENORM)@h /* denorm? */
-       addi    r11,r11,-4              /* HSRR0 is next instruction */
        bne+    denorm_assist
 #endif
 
@@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
  */
        XVCPSGNDP32(32)
 denorm_done:
+       mfspr   r11,SPRN_HSRR0
+       subi    r11,r11,4
        mtspr   SPRN_HSRR0,r11
        mtcrf   0x80,r9
        ld      r9,PACA_EXGEN+EX_R9(r13)
index 6bffbc5affe76ba7847ceb74b69e16cc53ac4178..7716374786bd97c7e56390ea587e967d75c68a2e 100644 (file)
@@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim)
        std     r1, PACATMSCRATCH(r13)
        ld      r1, PACAR1(r13)
 
-       /* Store the PPR in r11 and reset to decent value */
        std     r11, GPR11(r1)                  /* Temporary stash */
 
+       /*
+        * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
+        * clobbered by an exception once we turn on MSR_RI below.
+        */
+       ld      r11, PACATMSCRATCH(r13)
+       std     r11, GPR1(r1)
+
+       /*
+        * Store r13 away so we can free up the scratch SPR for the SLB fault
+        * handler (needed once we start accessing the thread_struct).
+        */
+       GET_SCRATCH0(r11)
+       std     r11, GPR13(r1)
+
        /* Reset MSR RI so we can take SLB faults again */
        li      r11, MSR_RI
        mtmsrd  r11, 1
 
+       /* Store the PPR in r11 and reset to decent value */
        mfspr   r11, SPRN_PPR
        HMT_MEDIUM
 
@@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim)
        SAVE_GPR(8, r7)                         /* user r8 */
        SAVE_GPR(9, r7)                         /* user r9 */
        SAVE_GPR(10, r7)                        /* user r10 */
-       ld      r3, PACATMSCRATCH(r13)          /* user r1 */
+       ld      r3, GPR1(r1)                    /* user r1 */
        ld      r4, GPR7(r1)                    /* user r7 */
        ld      r5, GPR11(r1)                   /* user r11 */
        ld      r6, GPR12(r1)                   /* user r12 */
-       GET_SCRATCH0(8)                         /* user r13 */
+       ld      r8, GPR13(r1)                   /* user r13 */
        std     r3, GPR1(r7)
        std     r4, GPR7(r7)
        std     r5, GPR11(r7)
index 933c574e1cf795d65855b60d763c62edf4d1996a..998f8d089ac7ea840341f0a741df3989d30542bc 100644 (file)
@@ -646,6 +646,16 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
         */
        local_irq_disable();
        ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+       /*
+        * If the PTE disappeared temporarily due to a THP
+        * collapse, just return and let the guest try again.
+        */
+       if (!ptep) {
+               local_irq_enable();
+               if (page)
+                       put_page(page);
+               return RESUME_GUEST;
+       }
        pte = *ptep;
        local_irq_enable();
 
index 886ed94b9c13307f5fc739e899274704faa89a15..d05c8af4ac51fe4c696469664e4bf6ceb9dd7d64 100644 (file)
@@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic)
        addc    r0, r8, r9
        ld      r10, 0(r4)
        ld      r11, 8(r4)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+       rotldi  r5, r5, 8
+#endif
        adde    r0, r0, r10
        add     r5, r5, r7
        adde    r0, r0, r11
index 850f3b8f4da5e55346afbbe939987b0a0642aa7d..6ae2777c220d4062860ecd64c567d754bf00da8a 100644 (file)
@@ -28,6 +28,12 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
 {
        int err;
 
+       /* Make sure we aren't patching a freed init section */
+       if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
+               pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
+               return 0;
+       }
+
        __put_user_size(instr, patch_addr, 4, err);
        if (err)
                return err;
index 5c8530d0c611898f012e2cc8f300a7112715c351..04ccb274a6205bba58357d5897105ada90f81c0f 100644 (file)
@@ -63,6 +63,7 @@
 #endif
 
 unsigned long long memory_limit;
+bool init_mem_is_free;
 
 #ifdef CONFIG_HIGHMEM
 pte_t *kmap_pte;
@@ -396,6 +397,7 @@ void free_initmem(void)
 {
        ppc_md.progress = ppc_printk_progress;
        mark_initmem_nx();
+       init_mem_is_free = true;
        free_initmem_default(POISON_FREE_INITMEM);
 }
 
index 35ac5422903a0ee5494c92b637bcc64604c7cba3..59d07bd5374a968f938135b6c0691b0245dd7da0 100644 (file)
@@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu)
        int new_nid;
 
        /* Use associativity from first thread for all siblings */
-       vphn_get_associativity(cpu, associativity);
+       if (vphn_get_associativity(cpu, associativity))
+               return cpu_to_node(cpu);
+
        new_nid = associativity_to_nid(associativity);
        if (new_nid < 0 || !node_possible(new_nid))
                new_nid = first_online_node;
@@ -1452,7 +1454,8 @@ static struct timer_list topology_timer;
 
 static void reset_topology_timer(void)
 {
-       mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
+       if (vphn_enabled)
+               mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
 }
 
 #ifdef CONFIG_SMP
index 333b1f80c435435cbf703a477e4bb60f6181a058..b271b283c785e3a07589ea81c6b8e40e7def5a69 100644 (file)
@@ -45,7 +45,7 @@ static void scan_pkey_feature(void)
         * Since any pkey can be used for data or execute, we will just treat
         * all keys as equal and track them as one entity.
         */
-       pkeys_total = be32_to_cpu(vals[0]);
+       pkeys_total = vals[0];
        pkeys_devtree_defined = true;
 }
 
index 6c5db1acbe8dffaba711faf55d13b2baa6c6a965..fe9691040f54c26561949c469738f277c90069e6 100644 (file)
@@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
        level_shift = entries_shift + 3;
        level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
 
-       if ((level_shift - 3) * levels + page_shift >= 60)
+       if ((level_shift - 3) * levels + page_shift >= 55)
                return -EINVAL;
 
        /* Allocate TCE table */
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
new file mode 100644 (file)
index 0000000..c9fecd1
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_PROTOTYPES_H
+
+#include <linux/ftrace.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_RISCV_PROTOTYPES_H */
index aee6031230306a934747c64edb4b61f6928e9e8b..b2d26d9d8489c8e8b6bba01adee0c573fd6564f8 100644 (file)
@@ -186,7 +186,7 @@ static void __init setup_bootmem(void)
        BUG_ON(mem_size == 0);
 
        set_max_mapnr(PFN_DOWN(mem_size));
-       max_low_pfn = pfn_base + PFN_DOWN(mem_size);
+       max_low_pfn = memblock_end_of_DRAM();
 
 #ifdef CONFIG_BLK_DEV_INITRD
        setup_initrd();
index eaa843a52907ffd8a166c09e98c1594627aff4ac..a480356e0ed886006749d69488c5af625828bf6d 100644 (file)
@@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit)
        push    %ebx
        push    %ecx
        push    %edx
-       push    %edi
-
-       /*
-        * RIP-relative addressing is needed to access the encryption bit
-        * variable. Since we are running in 32-bit mode we need this call/pop
-        * sequence to get the proper relative addressing.
-        */
-       call    1f
-1:     popl    %edi
-       subl    $1b, %edi
-
-       movl    enc_bit(%edi), %eax
-       cmpl    $0, %eax
-       jge     .Lsev_exit
 
        /* Check if running under a hypervisor */
        movl    $1, %eax
@@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit)
 
        movl    %ebx, %eax
        andl    $0x3f, %eax             /* Return the encryption bit location */
-       movl    %eax, enc_bit(%edi)
        jmp     .Lsev_exit
 
 .Lno_sev:
        xor     %eax, %eax
-       movl    %eax, enc_bit(%edi)
 
 .Lsev_exit:
-       pop     %edi
        pop     %edx
        pop     %ecx
        pop     %ebx
@@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask)
 ENDPROC(set_sev_encryption_mask)
 
        .data
-enc_bit:
-       .int    0xffffffff
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
        .balign 8
index fa3f439f0a9200321a96efd2ba874dbda4824cff..141d415a8c8098e9bd9747c94ee84e4de843c9f8 100644 (file)
@@ -68,7 +68,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
        $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
        -fno-omit-frame-pointer -foptimize-sibling-calls \
-       -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
+       -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
+
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_VDSO_CFLAGS),)
+  CFL += $(RETPOLINE_VDSO_CFLAGS)
+endif
+endif
 
 $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
 
@@ -138,7 +144,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
 KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
 KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
 KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
+
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_VDSO_CFLAGS),)
+  KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
+endif
+endif
+
 $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
 
 $(obj)/vdso32.so.dbg: FORCE \
index f19856d95c60919c92d1679e0037d9339c4c2a65..e48ca3afa0912cc8bb03bd6dba84b0999abe1982 100644 (file)
@@ -43,8 +43,9 @@ extern u8 hvclock_page
 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
        long ret;
-       asm("syscall" : "=a" (ret) :
-           "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+       asm ("syscall" : "=a" (ret), "=m" (*ts) :
+            "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
+            "memory", "rcx", "r11");
        return ret;
 }
 
@@ -52,8 +53,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
 {
        long ret;
 
-       asm("syscall" : "=a" (ret) :
-           "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+       asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
+            "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
+            "memory", "rcx", "r11");
        return ret;
 }
 
@@ -64,13 +66,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
        long ret;
 
-       asm(
+       asm (
                "mov %%ebx, %%edx \n"
-               "mov %2, %%ebx \n"
+               "mov %[clock], %%ebx \n"
                "call __kernel_vsyscall \n"
                "mov %%edx, %%ebx \n"
-               : "=a" (ret)
-               : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+               : "=a" (ret), "=m" (*ts)
+               : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
                : "memory", "edx");
        return ret;
 }
@@ -79,13 +81,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
 {
        long ret;
 
-       asm(
+       asm (
                "mov %%ebx, %%edx \n"
-               "mov %2, %%ebx \n"
+               "mov %[tv], %%ebx \n"
                "call __kernel_vsyscall \n"
                "mov %%edx, %%ebx \n"
-               : "=a" (ret)
-               : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+               : "=a" (ret), "=m" (*tv), "=m" (*tz)
+               : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
                : "memory", "edx");
        return ret;
 }
index 981ba5e8241ba2ece923ef22f162ac3820c684c4..8671de126eac09e0a63358d72305ce0a5e9f4f31 100644 (file)
@@ -36,6 +36,7 @@
 
 static int num_counters_llc;
 static int num_counters_nb;
+static bool l3_mask;
 
 static HLIST_HEAD(uncore_unused_list);
 
@@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event)
        hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
        hwc->idx = -1;
 
+       /*
+        * SliceMask and ThreadMask need to be set for certain L3 events in
+        * Family 17h. For other events, the two fields do not affect the count.
+        */
+       if (l3_mask)
+               hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
+
        if (event->cpu < 0)
                return -EINVAL;
 
@@ -525,6 +533,7 @@ static int __init amd_uncore_init(void)
                amd_llc_pmu.name          = "amd_l3";
                format_attr_event_df.show = &event_show_df;
                format_attr_event_l3.show = &event_show_l3;
+               l3_mask                   = true;
        } else {
                num_counters_nb           = NUM_COUNTERS_NB;
                num_counters_llc          = NUM_COUNTERS_L2;
@@ -532,6 +541,7 @@ static int __init amd_uncore_init(void)
                amd_llc_pmu.name          = "amd_l2";
                format_attr_event_df      = format_attr_event;
                format_attr_event_l3      = format_attr_event;
+               l3_mask                   = false;
        }
 
        amd_nb_pmu.attr_groups  = amd_uncore_attr_groups_df;
index 51d7c117e3c705f82136f422c553164f72e83c8c..c07bee31abe859c61c53c499e9aabcbe11f1f07b 100644 (file)
@@ -3061,7 +3061,7 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
 
 void bdx_uncore_cpu_init(void)
 {
-       int pkg = topology_phys_to_logical_pkg(0);
+       int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
 
        if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
@@ -3931,16 +3931,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
                .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
        },
        { /* M3UPI0 Link 0 */
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
-               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
        },
        { /* M3UPI0 Link 1 */
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
-               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
        },
        { /* M3UPI1 Link 2 */
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
-               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
        },
        { /* end: all zeroes */ }
 };
index 12f54082f4c8ec35fbad10a0270d0846a6b0e8c5..78241b736f2a04aa4ccac261727ecd8798042cda 100644 (file)
 #define INTEL_ARCH_EVENT_MASK  \
        (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
 
+#define AMD64_L3_SLICE_SHIFT                           48
+#define AMD64_L3_SLICE_MASK                            \
+       ((0xFULL) << AMD64_L3_SLICE_SHIFT)
+
+#define AMD64_L3_THREAD_SHIFT                          56
+#define AMD64_L3_THREAD_MASK                           \
+       ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
+
 #define X86_RAW_EVENT_MASK             \
        (ARCH_PERFMON_EVENTSEL_EVENT |  \
         ARCH_PERFMON_EVENTSEL_UMASK |  \
index a80c0673798fe760f6ce86ebcaf1c9ce9c75e7a6..e60c45fd3679bf900a8e1d53d2cb731c316d519f 100644 (file)
@@ -10,8 +10,13 @@ struct cpumask;
 struct mm_struct;
 
 #ifdef CONFIG_X86_UV
+#include <linux/efi.h>
 
 extern enum uv_system_type get_uv_system_type(void);
+static inline bool is_early_uv_system(void)
+{
+       return !((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab);
+}
 extern int is_uv_system(void);
 extern int is_uv_hubless(void);
 extern void uv_cpu_init(void);
@@ -23,6 +28,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 #else  /* X86_UV */
 
 static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
+static inline bool is_early_uv_system(void)    { return 0; }
 static inline int is_uv_system(void)   { return 0; }
 static inline int is_uv_hubless(void)  { return 0; }
 static inline void uv_cpu_init(void)   { }
index 22ab408177b2cd2b09c687cfda07436c4c30b3e5..eeea634bee0a73291a6f879706ef2e280f8e0d4f 100644 (file)
@@ -922,7 +922,7 @@ static void init_amd(struct cpuinfo_x86 *c)
 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 {
        /* AMD errata T13 (order #21922) */
-       if ((c->x86 == 6)) {
+       if (c->x86 == 6) {
                /* Duron Rev A0 */
                if (c->x86_model == 3 && c->x86_stepping == 0)
                        size = 64;
index 6490f618e09696a7a407859037a0b1635cbb6f9f..b52bd2b6cdb443ba0c89d78aaa52b02b82a10b6e 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/apic.h>
 #include <asm/intel-family.h>
 #include <asm/i8259.h>
+#include <asm/uv/uv.h>
 
 unsigned int __read_mostly cpu_khz;    /* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
@@ -1433,6 +1434,9 @@ void __init tsc_early_init(void)
 {
        if (!boot_cpu_has(X86_FEATURE_TSC))
                return;
+       /* Don't change UV TSC multi-chassis synchronization */
+       if (is_early_uv_system())
+               return;
        if (!determine_cpu_tsc_frequencies(true))
                return;
        loops_per_jiffy = get_loops_per_jiffy();
index d7e9bce6ff61c74d3d36fe6c9d4981e7136e0405..51b953ad9d4efe0e10a032228cc1418ff22167fe 100644 (file)
@@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
  */
 static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
 
+/*
+ * In some cases, we need to preserve the GFN of a non-present or reserved
+ * SPTE when we usurp the upper five bits of the physical address space to
+ * defend against L1TF, e.g. for MMIO SPTEs.  To preserve the GFN, we'll
+ * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
+ * left into the reserved bits, i.e. the GFN in the SPTE will be split into
+ * high and low parts.  This mask covers the lower bits of the GFN.
+ */
+static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
+
+
 static void mmu_spte_set(u64 *sptep, u64 spte);
 static union kvm_mmu_page_role
 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
@@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte)
 
 static gfn_t get_mmio_spte_gfn(u64 spte)
 {
-       u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
-                  shadow_nonpresent_or_rsvd_mask;
-       u64 gpa = spte & ~mask;
+       u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
 
        gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
               & shadow_nonpresent_or_rsvd_mask;
@@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
 static void kvm_mmu_reset_all_pte_masks(void)
 {
+       u8 low_phys_bits;
+
        shadow_user_mask = 0;
        shadow_accessed_mask = 0;
        shadow_dirty_mask = 0;
@@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
         * appropriate mask to guard against L1TF attacks. Otherwise, it is
         * assumed that the CPU is not vulnerable to L1TF.
         */
+       low_phys_bits = boot_cpu_data.x86_phys_bits;
        if (boot_cpu_data.x86_phys_bits <
-           52 - shadow_nonpresent_or_rsvd_mask_len)
+           52 - shadow_nonpresent_or_rsvd_mask_len) {
                shadow_nonpresent_or_rsvd_mask =
                        rsvd_bits(boot_cpu_data.x86_phys_bits -
                                  shadow_nonpresent_or_rsvd_mask_len,
                                  boot_cpu_data.x86_phys_bits - 1);
+               low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
+       }
+       shadow_nonpresent_or_rsvd_lower_gfn_mask =
+               GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
 }
 
 static int is_cpuid_PSE36(void)
index 06412ba46aa36eaca6cd1f111a6b8df6d795d969..612fd17be6351c48544abc36884df1c7669727da 100644 (file)
@@ -121,7 +121,6 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
 
 #define MSR_BITMAP_MODE_X2APIC         1
 #define MSR_BITMAP_MODE_X2APIC_APICV   2
-#define MSR_BITMAP_MODE_LM             4
 
 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
 
@@ -857,6 +856,7 @@ struct nested_vmx {
 
        /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
        u64 vmcs01_debugctl;
+       u64 vmcs01_guest_bndcfgs;
 
        u16 vpid02;
        u16 last_vpid;
@@ -2899,8 +2899,7 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
                vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
        }
 
-       if (is_long_mode(&vmx->vcpu))
-               wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+       wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 #else
        savesegment(fs, fs_sel);
        savesegment(gs, gs_sel);
@@ -2951,8 +2950,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
        vmx->loaded_cpu_state = NULL;
 
 #ifdef CONFIG_X86_64
-       if (is_long_mode(&vmx->vcpu))
-               rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+       rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 #endif
        if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
                kvm_load_ldt(host_state->ldt_sel);
@@ -2980,24 +2978,19 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
 #ifdef CONFIG_X86_64
 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
 {
-       if (is_long_mode(&vmx->vcpu)) {
-               preempt_disable();
-               if (vmx->loaded_cpu_state)
-                       rdmsrl(MSR_KERNEL_GS_BASE,
-                              vmx->msr_guest_kernel_gs_base);
-               preempt_enable();
-       }
+       preempt_disable();
+       if (vmx->loaded_cpu_state)
+               rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+       preempt_enable();
        return vmx->msr_guest_kernel_gs_base;
 }
 
 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
 {
-       if (is_long_mode(&vmx->vcpu)) {
-               preempt_disable();
-               if (vmx->loaded_cpu_state)
-                       wrmsrl(MSR_KERNEL_GS_BASE, data);
-               preempt_enable();
-       }
+       preempt_disable();
+       if (vmx->loaded_cpu_state)
+               wrmsrl(MSR_KERNEL_GS_BASE, data);
+       preempt_enable();
        vmx->msr_guest_kernel_gs_base = data;
 }
 #endif
@@ -3533,9 +3526,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
                VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
                VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
 
-       if (kvm_mpx_supported())
-               msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
-
        /* We support free control of debug control saving. */
        msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
 
@@ -3552,8 +3542,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
                VM_ENTRY_LOAD_IA32_PAT;
        msrs->entry_ctls_high |=
                (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
-       if (kvm_mpx_supported())
-               msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
 
        /* We support free control of debug control loading. */
        msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
@@ -3601,12 +3589,12 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
                msrs->secondary_ctls_high);
        msrs->secondary_ctls_low = 0;
        msrs->secondary_ctls_high &=
-               SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
                SECONDARY_EXEC_DESC |
                SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
                SECONDARY_EXEC_APIC_REGISTER_VIRT |
                SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
                SECONDARY_EXEC_WBINVD_EXITING;
+
        /*
         * We can emulate "VMCS shadowing," even if the hardware
         * doesn't support it.
@@ -3663,6 +3651,10 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
                msrs->secondary_ctls_high |=
                        SECONDARY_EXEC_UNRESTRICTED_GUEST;
 
+       if (flexpriority_enabled)
+               msrs->secondary_ctls_high |=
+                       SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+
        /* miscellaneous data */
        rdmsr(MSR_IA32_VMX_MISC,
                msrs->misc_low,
@@ -5073,19 +5065,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
        if (!msr)
                return;
 
-       /*
-        * MSR_KERNEL_GS_BASE is not intercepted when the guest is in
-        * 64-bit mode as a 64-bit kernel may frequently access the
-        * MSR.  This means we need to manually save/restore the MSR
-        * when switching between guest and host state, but only if
-        * the guest is in 64-bit mode.  Sync our cached value if the
-        * guest is transitioning to 32-bit mode and the CPU contains
-        * guest state, i.e. the cache is stale.
-        */
-#ifdef CONFIG_X86_64
-       if (!(efer & EFER_LMA))
-               (void)vmx_read_guest_kernel_gs_base(vmx);
-#endif
        vcpu->arch.efer = efer;
        if (efer & EFER_LMA) {
                vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
@@ -6078,9 +6057,6 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
                        mode |= MSR_BITMAP_MODE_X2APIC_APICV;
        }
 
-       if (is_long_mode(vcpu))
-               mode |= MSR_BITMAP_MODE_LM;
-
        return mode;
 }
 
@@ -6121,9 +6097,6 @@ static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
        if (!changed)
                return;
 
-       vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
-                                 !(mode & MSR_BITMAP_MODE_LM));
-
        if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
                vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
 
@@ -6189,6 +6162,11 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
        nested_mark_vmcs12_pages_dirty(vcpu);
 }
 
+static u8 vmx_get_rvi(void)
+{
+       return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
+}
+
 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6201,7 +6179,7 @@ static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
                WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
                return false;
 
-       rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff;
+       rvi = vmx_get_rvi();
 
        vapic_page = kmap(vmx->nested.virtual_apic_page);
        vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
@@ -10245,15 +10223,16 @@ static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
        if (!lapic_in_kernel(vcpu))
                return;
 
+       if (!flexpriority_enabled &&
+           !cpu_has_vmx_virtualize_x2apic_mode())
+               return;
+
        /* Postpone execution until vmcs01 is the current VMCS. */
        if (is_guest_mode(vcpu)) {
                to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
                return;
        }
 
-       if (!cpu_need_tpr_shadow(vcpu))
-               return;
-
        sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
        sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
                              SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
@@ -10375,6 +10354,14 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
        return max_irr;
 }
 
+static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
+{
+       u8 rvi = vmx_get_rvi();
+       u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
+
+       return ((rvi & 0xf0) > (vppr & 0xf0));
+}
+
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 {
        if (!kvm_vcpu_apicv_active(vcpu))
@@ -11264,6 +11251,23 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
 #undef cr4_fixed1_update
 }
 
+static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       if (kvm_mpx_supported()) {
+               bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
+
+               if (mpx_enabled) {
+                       vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
+                       vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
+               } else {
+                       vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
+                       vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
+               }
+       }
+}
+
 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -11280,8 +11284,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
                to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
                        ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
 
-       if (nested_vmx_allowed(vcpu))
+       if (nested_vmx_allowed(vcpu)) {
                nested_vmx_cr_fixed1_bits_update(vcpu);
+               nested_vmx_entry_exit_ctls_update(vcpu);
+       }
 }
 
 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -12049,8 +12055,13 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
        set_cr4_guest_host_mask(vmx);
 
-       if (vmx_mpx_supported())
-               vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+       if (kvm_mpx_supported()) {
+               if (vmx->nested.nested_run_pending &&
+                       (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+                       vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+               else
+                       vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
+       }
 
        if (enable_vpid) {
                if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
@@ -12595,15 +12606,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        bool from_vmentry = !!exit_qual;
        u32 dummy_exit_qual;
-       u32 vmcs01_cpu_exec_ctrl;
+       bool evaluate_pending_interrupts;
        int r = 0;
 
-       vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+       evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
+               (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
+       if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
+               evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
 
        enter_guest_mode(vcpu);
 
        if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
                vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+       if (kvm_mpx_supported() &&
+               !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+               vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
 
        vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
        vmx_segment_cache_clear(vmx);
@@ -12643,16 +12660,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
         * to L1 or delivered directly to L2 (e.g. In case L1 don't
         * intercept EXTERNAL_INTERRUPT).
         *
-        * Usually this would be handled by L0 requesting a
-        * IRQ/NMI window by setting VMCS accordingly. However,
-        * this setting was done on VMCS01 and now VMCS02 is active
-        * instead. Thus, we force L0 to perform pending event
-        * evaluation by requesting a KVM_REQ_EVENT.
-        */
-       if (vmcs01_cpu_exec_ctrl &
-               (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {
+        * Usually this would be handled by the processor noticing an
+        * IRQ/NMI window request, or checking RVI during evaluation of
+        * pending virtual interrupts.  However, this setting was done
+        * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
+        * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
+        */
+       if (unlikely(evaluate_pending_interrupts))
                kvm_make_request(KVM_REQ_EVENT, vcpu);
-       }
 
        /*
         * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
index edbf00ec56b34f7765551280e2872b28d1cb0740..ca717737347e670d25ede51b1c84118fd43a3d9c 100644 (file)
@@ -4698,7 +4698,7 @@ static void kvm_init_msr_list(void)
                 */
                switch (msrs_to_save[i]) {
                case MSR_IA32_BNDCFGS:
-                       if (!kvm_x86_ops->mpx_supported())
+                       if (!kvm_mpx_supported())
                                continue;
                        break;
                case MSR_TSC_AUX:
index 94e1ed667b6ea383a99f1cd76d6917af0d2a1ba6..41317c50a44628e9ef4930e9f17ad6d8297c9190 100644 (file)
@@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
 
        /*
         * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
-        * queue_hw_ctx after freeze the queue. So we could use q_usage_counter
-        * to avoid race with it. __blk_mq_update_nr_hw_queues will users
-        * synchronize_rcu to ensure all of the users go out of the critical
-        * section below and see zeroed q_usage_counter.
+        * queue_hw_ctx after freeze the queue, so we use q_usage_counter
+        * to avoid race with it.
         */
-       rcu_read_lock();
-       if (percpu_ref_is_zero(&q->q_usage_counter)) {
-               rcu_read_unlock();
+       if (!percpu_ref_tryget(&q->q_usage_counter))
                return;
-       }
 
        queue_for_each_hw_ctx(q, hctx, i) {
                struct blk_mq_tags *tags = hctx->tags;
@@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                        bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
                bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
        }
-       rcu_read_unlock();
+       blk_queue_exit(q);
 }
 
 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
index 85a1c1a59c72716ce2e31c280d7fd43d5c6e61e9..e3c39ea8e17b04b0787e53959cd4f68cb1a43f3d 100644 (file)
@@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                BUG_ON(!rq->q);
                if (rq->mq_ctx != this_ctx) {
                        if (this_ctx) {
-                               trace_block_unplug(this_q, depth, from_schedule);
+                               trace_block_unplug(this_q, depth, !from_schedule);
                                blk_mq_sched_insert_requests(this_q, this_ctx,
                                                                &ctx_list,
                                                                from_schedule);
@@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
         * on 'ctx_list'. Do those.
         */
        if (this_ctx) {
-               trace_block_unplug(this_q, depth, from_schedule);
+               trace_block_unplug(this_q, depth, !from_schedule);
                blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
                                                from_schedule);
        }
index 6a06b5d040e5dd8ffab230a3f3ae74bfa6935233..fae58b2f906fc5e0352c3f3194780abe13369784 100644 (file)
@@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q)
 
        while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
                ;
-       if (q->nr_sorted && printed++ < 10) {
+       if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
                printk(KERN_ERR "%s: forced dispatching is broken "
                       "(nr_sorted=%u), please report this\n",
                       q->elevator->type->elevator_name, q->nr_sorted);
index cbec9adc01c768e95cf8a3ad000697019f38f65c..ae4aa02e4dc6e41e9e658b416cff91cb1fb266ba 100644 (file)
@@ -2689,11 +2689,10 @@ static void ns_poll(struct timer_list *unused)
        PRINTK("nicstar: Entering ns_poll().\n");
        for (i = 0; i < num_cards; i++) {
                card = cards[i];
-               if (spin_is_locked(&card->int_lock)) {
+               if (!spin_trylock_irqsave(&card->int_lock, flags)) {
                        /* Probably it isn't worth spinning */
                        continue;
                }
-               spin_lock_irqsave(&card->int_lock, flags);
 
                stat_w = 0;
                stat_r = readl(card->membase + STAT);
index 3f68e2919dc5da70bb29fb1468038c76e3d61d8c..a690fd40026051453ba138d4919811b726b9789b 100644 (file)
@@ -1713,8 +1713,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 
        dpm_wait_for_subordinate(dev, async);
 
-       if (async_error)
+       if (async_error) {
+               dev->power.direct_complete = false;
                goto Complete;
+       }
 
        /*
         * If a device configured to wake up the system from sleep states
@@ -1726,6 +1728,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
                pm_wakeup_event(dev, 0);
 
        if (pm_wakeup_pending()) {
+               dev->power.direct_complete = false;
                async_error = -EBUSY;
                goto Complete;
        }
index a71d817e900ddc07ff45d240f0ae290ff408b6cd..429d20131c7e228f81bcbd6dd72ed8a21290c14f 100644 (file)
@@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info)
                        list_del(&gnt_list_entry->node);
                        gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
                        rinfo->persistent_gnts_c--;
-                       __free_page(gnt_list_entry->page);
-                       kfree(gnt_list_entry);
+                       gnt_list_entry->gref = GRANT_INVALID_REF;
+                       list_add_tail(&gnt_list_entry->node, &rinfo->grants);
                }
 
                spin_unlock_irqrestore(&rinfo->ring_lock, flags);
index 3d7a5c149af331dce43990e41a595467f00ac3d4..1ad4991753bb6c8f213f6cb77cfe9f7f56e03a4c 100644 (file)
@@ -203,10 +203,11 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { }     /* Terminating entry */
 };
 
-static inline void ath3k_log_failed_loading(int err, int len, int size)
+static inline void ath3k_log_failed_loading(int err, int len, int size,
+                                           int count)
 {
-       BT_ERR("Error in firmware loading err = %d, len = %d, size = %d",
-                       err, len, size);
+       BT_ERR("Firmware loading err = %d, len = %d, size = %d, count = %d",
+              err, len, size, count);
 }
 
 #define USB_REQ_DFU_DNLOAD     1
@@ -257,7 +258,7 @@ static int ath3k_load_firmware(struct usb_device *udev,
                                        &len, 3000);
 
                if (err || (len != size)) {
-                       ath3k_log_failed_loading(err, len, size);
+                       ath3k_log_failed_loading(err, len, size, count);
                        goto error;
                }
 
@@ -356,7 +357,7 @@ static int ath3k_load_fwfile(struct usb_device *udev,
                err = usb_bulk_msg(udev, pipe, send_buf, size,
                                        &len, 3000);
                if (err || (len != size)) {
-                       ath3k_log_failed_loading(err, len, size);
+                       ath3k_log_failed_loading(err, len, size, count);
                        kfree(send_buf);
                        return err;
                }
index 25b0cf952b91757fcfa9abb85ab9cc2a74545218..54713833951a9ddd6fa34780a263942863e242a2 100644 (file)
@@ -448,7 +448,7 @@ static int bt3c_load_firmware(struct bt3c_info *info,
 {
        char *ptr = (char *) firmware;
        char b[9];
-       unsigned int iobase, tmp;
+       unsigned int iobase, tmp, tn;
        unsigned long size, addr, fcs;
        int i, err = 0;
 
@@ -490,7 +490,9 @@ static int bt3c_load_firmware(struct bt3c_info *info,
                memset(b, 0, sizeof(b));
                for (tmp = 0, i = 0; i < size; i++) {
                        memcpy(b, ptr + (i * 2) + 2, 2);
-                       tmp += simple_strtol(b, NULL, 16);
+                       if (kstrtouint(b, 16, &tn))
+                               return -EINVAL;
+                       tmp += tn;
                }
 
                if (((tmp + fcs) & 0xff) != 0xff) {
@@ -505,7 +507,8 @@ static int bt3c_load_firmware(struct bt3c_info *info,
                        memset(b, 0, sizeof(b));
                        for (i = 0; i < (size - 4) / 2; i++) {
                                memcpy(b, ptr + (i * 4) + 12, 4);
-                               tmp = simple_strtoul(b, NULL, 16);
+                               if (kstrtouint(b, 16, &tmp))
+                                       return -EINVAL;
                                bt3c_put(iobase, tmp);
                        }
                }
index 99cde1f9467d4edae71cc9820b35c00a75016650..e3e4d929e74f52566b385d56731c8e2d8fbf3daa 100644 (file)
@@ -324,6 +324,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
        { 0x4103, "BCM4330B1"   },      /* 002.001.003 */
        { 0x410e, "BCM43341B0"  },      /* 002.001.014 */
        { 0x4406, "BCM4324B3"   },      /* 002.004.006 */
+       { 0x6109, "BCM4335C0"   },      /* 003.001.009 */
        { 0x610c, "BCM4354"     },      /* 003.001.012 */
        { 0x2122, "BCM4343A0"   },      /* 001.001.034 */
        { 0x2209, "BCM43430A1"  },      /* 001.002.009 */
index 60d1419590babb9f275e460756250b90b9234ab7..3951f7b23840447030eaabe617d125b97c25495e 100644 (file)
@@ -21,8 +21,9 @@
 #include <net/rsi_91x.h>
 #include <net/genetlink.h>
 
-#define RSI_HEADROOM_FOR_BT_HAL        16
+#define RSI_DMA_ALIGN  8
 #define RSI_FRAME_DESC_SIZE    16
+#define RSI_HEADROOM_FOR_BT_HAL        (RSI_FRAME_DESC_SIZE + RSI_DMA_ALIGN)
 
 struct rsi_hci_adapter {
        void *priv;
@@ -70,6 +71,16 @@ static int rsi_hci_send_pkt(struct hci_dev *hdev, struct sk_buff *skb)
                bt_cb(new_skb)->pkt_type = hci_skb_pkt_type(skb);
                kfree_skb(skb);
                skb = new_skb;
+               if (!IS_ALIGNED((unsigned long)skb->data, RSI_DMA_ALIGN)) {
+                       u8 *skb_data = skb->data;
+                       int skb_len = skb->len;
+
+                       skb_push(skb, RSI_DMA_ALIGN);
+                       skb_pull(skb, PTR_ALIGN(skb->data,
+                                               RSI_DMA_ALIGN) - skb->data);
+                       memmove(skb->data, skb_data, skb_len);
+                       skb_trim(skb, skb_len);
+               }
        }
 
        return h_adapter->proto_ops->coex_send_pkt(h_adapter->priv, skb,
index 7f9ea8e4c1b227b56182ba83751a2f03bc670a07..41405de27d66570a1311ba7d9e1389d2d2cb5668 100644 (file)
@@ -138,6 +138,13 @@ static const struct id_table ic_id_table[] = {
          .fw_name  = "rtl_bt/rtl8761a_fw.bin",
          .cfg_name = "rtl_bt/rtl8761a_config" },
 
+       /* 8822C with USB interface */
+       { IC_INFO(RTL_ROM_LMP_8822B, 0xc),
+         .config_needed = false,
+         .has_rom_version = true,
+         .fw_name  = "rtl_bt/rtl8822cu_fw.bin",
+         .cfg_name = "rtl_bt/rtl8822cu_config" },
+
        /* 8822B */
        { IC_INFO(RTL_ROM_LMP_8822B, 0xb),
          .config_needed = true,
@@ -206,7 +213,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
                                struct btrtl_device_info *btrtl_dev,
                                unsigned char **_buf)
 {
-       const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
+       static const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
        struct rtl_epatch_header *epatch_info;
        unsigned char *buf;
        int i, len;
@@ -228,6 +235,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
                { RTL_ROM_LMP_8822B, 8 },
                { RTL_ROM_LMP_8723B, 9 },       /* 8723D */
                { RTL_ROM_LMP_8821A, 10 },      /* 8821C */
+               { RTL_ROM_LMP_8822B, 13 },      /* 8822C */
        };
 
        min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
index cd2e5cf14ea5b9a6bba979bd0d822a46812d574f..61cde1a7ec1be7fe25d3e1bc8a0f4366f1d309dd 100644 (file)
@@ -3096,6 +3096,7 @@ static int btusb_probe(struct usb_interface *intf,
                hdev->set_diag = btintel_set_diag;
                hdev->set_bdaddr = btintel_set_bdaddr;
                set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+               set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
                set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
        }
 
index e182f6019f68abeadac89ce14405a3e27efb7a49..d98ed04422017f9f9140c19a5324323e67003fec 100644 (file)
@@ -167,7 +167,8 @@ struct qca_serdev {
 };
 
 static int qca_power_setup(struct hci_uart *hu, bool on);
-static void qca_power_shutdown(struct hci_dev *hdev);
+static void qca_power_shutdown(struct hci_uart *hu);
+static int qca_power_off(struct hci_dev *hdev);
 
 static void __serial_clock_on(struct tty_struct *tty)
 {
@@ -499,7 +500,6 @@ static int qca_open(struct hci_uart *hu)
        hu->priv = qca;
 
        if (hu->serdev) {
-               serdev_device_open(hu->serdev);
 
                qcadev = serdev_device_get_drvdata(hu->serdev);
                if (qcadev->btsoc_type != QCA_WCN3990) {
@@ -609,11 +609,10 @@ static int qca_close(struct hci_uart *hu)
        if (hu->serdev) {
                qcadev = serdev_device_get_drvdata(hu->serdev);
                if (qcadev->btsoc_type == QCA_WCN3990)
-                       qca_power_shutdown(hu->hdev);
+                       qca_power_shutdown(hu);
                else
                        gpiod_set_value_cansleep(qcadev->bt_en, 0);
 
-               serdev_device_close(hu->serdev);
        }
 
        kfree_skb(qca->rx_skb);
@@ -1101,8 +1100,26 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
 static int qca_wcn3990_init(struct hci_uart *hu)
 {
        struct hci_dev *hdev = hu->hdev;
+       struct qca_serdev *qcadev;
        int ret;
 
+       /* Check for vregs status, may be hci down has turned
+        * off the voltage regulator.
+        */
+       qcadev = serdev_device_get_drvdata(hu->serdev);
+       if (!qcadev->bt_power->vregs_on) {
+               serdev_device_close(hu->serdev);
+               ret = qca_power_setup(hu, true);
+               if (ret)
+                       return ret;
+
+               ret = serdev_device_open(hu->serdev);
+               if (ret) {
+                       bt_dev_err(hu->hdev, "failed to open port");
+                       return ret;
+               }
+       }
+
        /* Forcefully enable wcn3990 to enter in to boot mode. */
        host_set_baudrate(hu, 2400);
        ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
@@ -1154,6 +1171,12 @@ static int qca_setup(struct hci_uart *hu)
 
        if (qcadev->btsoc_type == QCA_WCN3990) {
                bt_dev_info(hdev, "setting up wcn3990");
+
+               /* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute
+                * setup for every hci up.
+                */
+               set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+               hu->hdev->shutdown = qca_power_off;
                ret = qca_wcn3990_init(hu);
                if (ret)
                        return ret;
@@ -1232,15 +1255,26 @@ static const struct qca_vreg_data qca_soc_data = {
        .num_vregs = 4,
 };
 
-static void qca_power_shutdown(struct hci_dev *hdev)
+static void qca_power_shutdown(struct hci_uart *hu)
 {
-       struct hci_uart *hu = hci_get_drvdata(hdev);
+       struct serdev_device *serdev = hu->serdev;
+       unsigned char cmd = QCA_WCN3990_POWEROFF_PULSE;
 
        host_set_baudrate(hu, 2400);
-       qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
+       hci_uart_set_flow_control(hu, true);
+       serdev_device_write_buf(serdev, &cmd, sizeof(cmd));
+       hci_uart_set_flow_control(hu, false);
        qca_power_setup(hu, false);
 }
 
+static int qca_power_off(struct hci_dev *hdev)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+
+       qca_power_shutdown(hu);
+       return 0;
+}
+
 static int qca_enable_regulator(struct qca_vreg vregs,
                                struct regulator *regulator)
 {
@@ -1413,7 +1447,7 @@ static void qca_serdev_remove(struct serdev_device *serdev)
        struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
 
        if (qcadev->btsoc_type == QCA_WCN3990)
-               qca_power_shutdown(qcadev->serdev_hu.hdev);
+               qca_power_shutdown(&qcadev->serdev_hu);
        else
                clk_disable_unprepare(qcadev->susclk);
 
index aa2543b3c286968fece4acee97953eb2c1fe0ba8..c445aa9ac511ed2e5c3a037d6c522ff91e196d12 100644 (file)
@@ -57,9 +57,10 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
 {
        struct sk_buff *skb = hu->tx_skb;
 
-       if (!skb)
-               skb = hu->proto->dequeue(hu);
-       else
+       if (!skb) {
+               if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+                       skb = hu->proto->dequeue(hu);
+       } else
                hu->tx_skb = NULL;
 
        return skb;
@@ -94,7 +95,7 @@ static void hci_uart_write_work(struct work_struct *work)
                        hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
                        kfree_skb(skb);
                }
-       } while(test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
+       } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
 
        clear_bit(HCI_UART_SENDING, &hu->tx_state);
 }
@@ -368,6 +369,7 @@ void hci_uart_unregister_device(struct hci_uart *hu)
 {
        struct hci_dev *hdev = hu->hdev;
 
+       clear_bit(HCI_UART_PROTO_READY, &hu->flags);
        hci_unregister_dev(hdev);
        hci_free_dev(hdev);
 
index ec8a4376f74fb4f9da1f369a968df457064315e2..2fab18fae4fcbbeeb44cfbc4f2517258cd3dc505 100644 (file)
@@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
        data->base = of_iomap(node, 0);
        if (!data->base) {
                pr_err("Could not map PIT address\n");
-               return -ENXIO;
+               ret = -ENXIO;
+               goto exit;
        }
 
        data->mck = of_clk_get(node, 0);
        if (IS_ERR(data->mck)) {
                pr_err("Unable to get mck clk\n");
-               return PTR_ERR(data->mck);
+               ret = PTR_ERR(data->mck);
+               goto exit;
        }
 
        ret = clk_prepare_enable(data->mck);
        if (ret) {
                pr_err("Unable to enable mck\n");
-               return ret;
+               goto exit;
        }
 
        /* Get the interrupts property */
        data->irq = irq_of_parse_and_map(node, 0);
        if (!data->irq) {
                pr_err("Unable to get IRQ from DT\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto exit;
        }
 
        /*
@@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
        ret = clocksource_register_hz(&data->clksrc, pit_rate);
        if (ret) {
                pr_err("Failed to register clocksource\n");
-               return ret;
+               goto exit;
        }
 
        /* Set up irq handler */
@@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
                          "at91_tick", data);
        if (ret) {
                pr_err("Unable to setup IRQ\n");
-               return ret;
+               clocksource_unregister(&data->clksrc);
+               goto exit;
        }
 
        /* Set up and register clockevents */
@@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
        clockevents_register_device(&data->clkevt);
 
        return 0;
+
+exit:
+       kfree(data);
+       return ret;
 }
 TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
                       at91sam926x_pit_dt_init);
index c020038ebfab2242ed844a143f3ce6706985685c..cf93f6419b5142e397747be406138dacf3278a5c 100644 (file)
@@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
        cr &= ~fttmr010->t1_enable_val;
        writel(cr, fttmr010->base + TIMER_CR);
 
-       /* Setup the match register forward/backward in time */
-       cr = readl(fttmr010->base + TIMER1_COUNT);
-       if (fttmr010->count_down)
-               cr -= cycles;
-       else
-               cr += cycles;
-       writel(cr, fttmr010->base + TIMER1_MATCH1);
+       if (fttmr010->count_down) {
+               /*
+                * ASPEED Timer Controller will load TIMER1_LOAD register
+                * into TIMER1_COUNT register when the timer is re-enabled.
+                */
+               writel(cycles, fttmr010->base + TIMER1_LOAD);
+       } else {
+               /* Setup the match register forward in time */
+               cr = readl(fttmr010->base + TIMER1_COUNT);
+               writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
+       }
 
        /* Start */
        cr = readl(fttmr010->base + TIMER_CR);
index 29e2e1a78a43372ee96e64bb9b93d6b21b5288f7..6949a9113dbb417aec69b444f365746220943e16 100644 (file)
@@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
                return -ENXIO;
        }
 
+       if (!of_machine_is_compatible("ti,am43"))
+               ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
+
        ti_32k_timer.counter = ti_32k_timer.base;
 
        /*
index a1830fa25fc5bd5eed0c627592c3a11c29e121da..2a3675c24032bc8059c4c591698d6a7b5218cf1d 100644 (file)
@@ -44,7 +44,7 @@ enum _msm8996_version {
 
 struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
 
-static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
+static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
 {
        size_t len;
        u32 *msm_id;
@@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void)
 }
 module_init(qcom_cpufreq_kryo_init);
 
-static void __init qcom_cpufreq_kryo_exit(void)
+static void __exit qcom_cpufreq_kryo_exit(void)
 {
        platform_device_unregister(kryo_cpufreq_pdev);
        platform_driver_unregister(&qcom_cpufreq_kryo_driver);
index d67667970f7e21ae265076beecf24710e89fde67..ec40f991e6c63c4e6be98df8fc964312bc9afbf2 100644 (file)
@@ -1553,8 +1553,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
        edesc->src_nents = src_nents;
        edesc->dst_nents = dst_nents;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
-       edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
-                        desc_bytes;
+       edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+                                                 desc_bytes);
        edesc->iv_dir = DMA_TO_DEVICE;
 
        /* Make sure IV is located in a DMAable area */
@@ -1757,8 +1757,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
        edesc->src_nents = src_nents;
        edesc->dst_nents = dst_nents;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
-       edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
-                        desc_bytes;
+       edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+                                                 desc_bytes);
        edesc->iv_dir = DMA_FROM_DEVICE;
 
        /* Make sure IV is located in a DMAable area */
index 5c539af8ed6048c9687dae5f99653d57a17bc4c7..010bbf607797f26acaecd1dc542ffa2fb049bf7a 100644 (file)
@@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
        walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 }
 
-static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
+                                int pci_chan_id)
 {
        struct cpl_rx_phys_dsgl *phys_cpl;
 
@@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
        phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
        phys_cpl->rss_hdr_int.qid = htons(qid);
        phys_cpl->rss_hdr_int.hash_val = 0;
+       phys_cpl->rss_hdr_int.channel = pci_chan_id;
 }
 
 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
@@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
                FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
                                !!lcb, ctx->tx_qidx);
 
-       chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
+       chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
                                                       qid);
        chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
                                     ((sizeof(chcr_req->wreq)) >> 4)));
@@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_context *ctx)
                                    adap->vres.ncrypto_fc);
                rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
                txq_perchan = ntxq / u_ctx->lldi.nchan;
-               rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
-               rxq_idx += id % rxq_perchan;
-               txq_idx = ctx->dev->tx_channel_id * txq_perchan;
-               txq_idx += id % txq_perchan;
                spin_lock(&ctx->dev->lock_chcr_dev);
-               ctx->rx_qidx = rxq_idx;
-               ctx->tx_qidx = txq_idx;
+               ctx->tx_chan_id = ctx->dev->tx_channel_id;
                ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
                ctx->dev->rx_channel_id = 0;
                spin_unlock(&ctx->dev->lock_chcr_dev);
+               rxq_idx = ctx->tx_chan_id * rxq_perchan;
+               rxq_idx += id % rxq_perchan;
+               txq_idx = ctx->tx_chan_id * txq_perchan;
+               txq_idx += id % txq_perchan;
+               ctx->rx_qidx = rxq_idx;
+               ctx->tx_qidx = txq_idx;
+               /* Channel Id used by SGE to forward packet to Host.
+                * Same value should be used in cpl_fw6_pld RSS_CH field
+                * by FW. Driver programs PCI channel ID to be used in fw
+                * at the time of queue allocation with value "pi->tx_chan"
+                */
+               ctx->pci_chan_id = txq_idx / txq_perchan;
        }
 out:
        return err;
@@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct dsgl_walk dsgl_walk;
        unsigned int authsize = crypto_aead_authsize(tfm);
+       struct chcr_context *ctx = a_ctx(tfm);
        u32 temp;
 
        dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
        dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
        temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
        dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
-       dsgl_walk_end(&dsgl_walk, qid);
+       dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
 }
 
 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
@@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
                             unsigned short qid)
 {
        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+       struct chcr_context *ctx = c_ctx(tfm);
        struct dsgl_walk dsgl_walk;
 
        dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
        reqctx->dstsg = dsgl_walk.last_sg;
        reqctx->dst_ofst = dsgl_walk.last_sg_len;
 
-       dsgl_walk_end(&dsgl_walk, qid);
+       dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
 }
 
 void chcr_add_hash_src_ent(struct ahash_request *req,
index 54835cb109e561ee4ce68f673eaceda38a39e35e..0d2c70c344f39bdf34751a1d64edb2efea426e3b 100644 (file)
@@ -255,6 +255,8 @@ struct chcr_context {
        struct chcr_dev *dev;
        unsigned char tx_qidx;
        unsigned char rx_qidx;
+       unsigned char tx_chan_id;
+       unsigned char pci_chan_id;
        struct __crypto_ctx crypto_ctx[0];
 };
 
index a10c418d4e5c60b7aba530f5ce7a9a0865930489..56bd28174f5251c11c8996a959fc7e96160ee6ac 100644 (file)
@@ -63,7 +63,7 @@ struct dcp {
        struct dcp_coherent_block       *coh;
 
        struct completion               completion[DCP_MAX_CHANS];
-       struct mutex                    mutex[DCP_MAX_CHANS];
+       spinlock_t                      lock[DCP_MAX_CHANS];
        struct task_struct              *thread[DCP_MAX_CHANS];
        struct crypto_queue             queue[DCP_MAX_CHANS];
 };
@@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
 
        int ret;
 
-       do {
-               __set_current_state(TASK_INTERRUPTIBLE);
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
 
-               mutex_lock(&sdcp->mutex[chan]);
+               spin_lock(&sdcp->lock[chan]);
                backlog = crypto_get_backlog(&sdcp->queue[chan]);
                arq = crypto_dequeue_request(&sdcp->queue[chan]);
-               mutex_unlock(&sdcp->mutex[chan]);
+               spin_unlock(&sdcp->lock[chan]);
+
+               if (!backlog && !arq) {
+                       schedule();
+                       continue;
+               }
+
+               set_current_state(TASK_RUNNING);
 
                if (backlog)
                        backlog->complete(backlog, -EINPROGRESS);
@@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
                if (arq) {
                        ret = mxs_dcp_aes_block_crypt(arq);
                        arq->complete(arq, ret);
-                       continue;
                }
-
-               schedule();
-       } while (!kthread_should_stop());
+       }
 
        return 0;
 }
@@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
        rctx->ecb = ecb;
        actx->chan = DCP_CHAN_CRYPTO;
 
-       mutex_lock(&sdcp->mutex[actx->chan]);
+       spin_lock(&sdcp->lock[actx->chan]);
        ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-       mutex_unlock(&sdcp->mutex[actx->chan]);
+       spin_unlock(&sdcp->lock[actx->chan]);
 
        wake_up_process(sdcp->thread[actx->chan]);
 
@@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
        struct ahash_request *req;
        int ret, fini;
 
-       do {
-               __set_current_state(TASK_INTERRUPTIBLE);
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
 
-               mutex_lock(&sdcp->mutex[chan]);
+               spin_lock(&sdcp->lock[chan]);
                backlog = crypto_get_backlog(&sdcp->queue[chan]);
                arq = crypto_dequeue_request(&sdcp->queue[chan]);
-               mutex_unlock(&sdcp->mutex[chan]);
+               spin_unlock(&sdcp->lock[chan]);
+
+               if (!backlog && !arq) {
+                       schedule();
+                       continue;
+               }
+
+               set_current_state(TASK_RUNNING);
 
                if (backlog)
                        backlog->complete(backlog, -EINPROGRESS);
@@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
                        ret = dcp_sha_req_to_buf(arq);
                        fini = rctx->fini;
                        arq->complete(arq, ret);
-                       if (!fini)
-                               continue;
                }
-
-               schedule();
-       } while (!kthread_should_stop());
+       }
 
        return 0;
 }
@@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
                rctx->init = 1;
        }
 
-       mutex_lock(&sdcp->mutex[actx->chan]);
+       spin_lock(&sdcp->lock[actx->chan]);
        ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-       mutex_unlock(&sdcp->mutex[actx->chan]);
+       spin_unlock(&sdcp->lock[actx->chan]);
 
        wake_up_process(sdcp->thread[actx->chan]);
        mutex_unlock(&actx->mutex);
@@ -997,7 +1004,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, sdcp);
 
        for (i = 0; i < DCP_MAX_CHANS; i++) {
-               mutex_init(&sdcp->mutex[i]);
+               spin_lock_init(&sdcp->lock[i]);
                init_completion(&sdcp->completion[i]);
                crypto_init_queue(&sdcp->queue[i], 50);
        }
index ba197f34c252a8d9acc57d400a478797577eeb2f..763c2166ee0ec970a03366c0897b11c9eea2e5a3 100644 (file)
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_C3XXX_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index 24ec908eb26c25c0f26efbe55b15fc34f421dcf9..613c7d5644ced6d250adefdf3d49a7c4d0876938 100644 (file)
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_C3XXXIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index 59a5a0df50b61ed310b7101df1874a14a632f83d..9cb832963357ddcf23686cc3bfec34960646d611 100644 (file)
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_C62X_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index b9f3e0e4fde97dbbcc81c5614b9c348aecf6f717..278452b8ef81c8a5e5aa296a38bb8caf6402fdd3 100644 (file)
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_C62XIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index be5c5a988ca59bed894d52e0e2d734f291e8f0f1..3a9708ef4ce2147b0aa6d60aba27a56506779e3f 100644 (file)
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index 26ab17bfc6dabd0fb75d42284f3febef8e390e79..3da0f951cb590a555fea8a9d848c657888a305d9 100644 (file)
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_DH895XCCIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index bbe4d72ca105b001e36b1d09d382ee9e3a89ee7c..948806e57cee33f74024adb442f398579319b89d 100644 (file)
@@ -535,6 +535,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
        return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
 }
 
+static const struct address_space_operations dev_dax_aops = {
+       .set_page_dirty         = noop_set_page_dirty,
+       .invalidatepage         = noop_invalidatepage,
+};
+
 static int dax_open(struct inode *inode, struct file *filp)
 {
        struct dax_device *dax_dev = inode_dax(inode);
@@ -544,6 +549,7 @@ static int dax_open(struct inode *inode, struct file *filp)
        dev_dbg(&dev_dax->dev, "trace\n");
        inode->i_mapping = __dax_inode->i_mapping;
        inode->i_mapping->host = __dax_inode;
+       inode->i_mapping->a_ops = &dev_dax_aops;
        filp->f_mapping = inode->i_mapping;
        filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
        filp->private_data = dev_dax;
index e8f8a199939350a97754031781d3733849a49525..a57300c1d649a36ef6ecbd207a2afd4e92b86513 100644 (file)
@@ -571,7 +571,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
                if (ret)
                        goto out_free_descs;
                lh->descs[i] = desc;
-               count = i;
+               count = i + 1;
 
                if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
                        set_bit(FLAG_ACTIVE_LOW, &desc->flags);
index 0cc5190f4f36e4a1b192a17b274bbd4fe38c00b6..5f3f540738187c6db03a7975bced71ea4163c9e0 100644 (file)
@@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
 {
        int i;
 
+       cancel_delayed_work_sync(&adev->vce.idle_work);
+
        if (adev->vce.vcpu_bo == NULL)
                return 0;
 
@@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
        if (i == AMDGPU_MAX_VCE_HANDLES)
                return 0;
 
-       cancel_delayed_work_sync(&adev->vce.idle_work);
        /* TODO: suspending running encoding sessions isn't supported */
        return -EINVAL;
 }
index fd654a4406db964da6611f6c2d3d9db1fb283519..400fc74bbae27e878aebe4e6e27f6eaf22ca8e15 100644 (file)
@@ -153,11 +153,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
        unsigned size;
        void *ptr;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if (adev->vcn.vcpu_bo == NULL)
                return 0;
 
-       cancel_delayed_work_sync(&adev->vcn.idle_work);
-
        size = amdgpu_bo_size(adev->vcn.vcpu_bo);
        ptr = adev->vcn.cpu_addr;
 
index ec0d62a16e538c305f631b831432df0566b051c8..4f22e745df51b4c2aad4ec842f04ac96b4070f68 100644 (file)
@@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
                                        struct queue *q,
                                        struct qcm_process_device *qpd)
 {
-       int retval;
        struct mqd_manager *mqd_mgr;
+       int retval;
 
        mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
        if (!mqd_mgr)
@@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
        if (!q->properties.is_active)
                return 0;
 
-       retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
-                       &q->properties, q->process->mm);
+       if (WARN(q->process->mm != current->mm,
+                "should only run in user thread"))
+               retval = -EFAULT;
+       else
+               retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+                                          &q->properties, current->mm);
        if (retval)
                goto out_uninit_mqd;
 
@@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
                retval = map_queues_cpsch(dqm);
        else if (q->properties.is_active &&
                 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
-                 q->properties.type == KFD_QUEUE_TYPE_SDMA))
-               retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
-                                      &q->properties, q->process->mm);
+                 q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+               if (WARN(q->process->mm != current->mm,
+                        "should only run in user thread"))
+                       retval = -EFAULT;
+               else
+                       retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
+                                                  q->pipe, q->queue,
+                                                  &q->properties, current->mm);
+       }
 
 out_unlock:
        dqm_unlock(dqm);
@@ -653,6 +663,7 @@ out:
 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                                          struct qcm_process_device *qpd)
 {
+       struct mm_struct *mm = NULL;
        struct queue *q;
        struct mqd_manager *mqd_mgr;
        struct kfd_process_device *pdd;
@@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                kfd_flush_tlb(pdd);
        }
 
+       /* Take a safe reference to the mm_struct, which may otherwise
+        * disappear even while the kfd_process is still referenced.
+        */
+       mm = get_task_mm(pdd->process->lead_thread);
+       if (!mm) {
+               retval = -EFAULT;
+               goto out;
+       }
+
        /* activate all active queues on the qpd */
        list_for_each_entry(q, &qpd->queues_list, list) {
                if (!q->properties.is_evicted)
@@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                q->properties.is_evicted = false;
                q->properties.is_active = true;
                retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
-                                      q->queue, &q->properties,
-                                      q->process->mm);
+                                      q->queue, &q->properties, mm);
                if (retval)
                        goto out;
                dqm->queue_count++;
        }
        qpd->evicted = 0;
 out:
+       if (mm)
+               mmput(mm);
        dqm_unlock(dqm);
        return retval;
 }
index 800f481a6995fce0129bb316e078d2dcd8ab9eeb..6903fe6c894ba053693c16d3dd23538d58026ea2 100644 (file)
@@ -641,6 +641,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
        return NULL;
 }
 
+static void emulated_link_detect(struct dc_link *link)
+{
+       struct dc_sink_init_data sink_init_data = { 0 };
+       struct display_sink_capability sink_caps = { 0 };
+       enum dc_edid_status edid_status;
+       struct dc_context *dc_ctx = link->ctx;
+       struct dc_sink *sink = NULL;
+       struct dc_sink *prev_sink = NULL;
+
+       link->type = dc_connection_none;
+       prev_sink = link->local_sink;
+
+       if (prev_sink != NULL)
+               dc_sink_retain(prev_sink);
+
+       switch (link->connector_signal) {
+       case SIGNAL_TYPE_HDMI_TYPE_A: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+               break;
+       }
+
+       case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+               break;
+       }
+
+       case SIGNAL_TYPE_DVI_DUAL_LINK: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+               break;
+       }
+
+       case SIGNAL_TYPE_LVDS: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_LVDS;
+               break;
+       }
+
+       case SIGNAL_TYPE_EDP: {
+               sink_caps.transaction_type =
+                       DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+               sink_caps.signal = SIGNAL_TYPE_EDP;
+               break;
+       }
+
+       case SIGNAL_TYPE_DISPLAY_PORT: {
+               sink_caps.transaction_type =
+                       DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+               sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
+               break;
+       }
+
+       default:
+               DC_ERROR("Invalid connector type! signal:%d\n",
+                       link->connector_signal);
+               return;
+       }
+
+       sink_init_data.link = link;
+       sink_init_data.sink_signal = sink_caps.signal;
+
+       sink = dc_sink_create(&sink_init_data);
+       if (!sink) {
+               DC_ERROR("Failed to create sink!\n");
+               return;
+       }
+
+       link->local_sink = sink;
+
+       edid_status = dm_helpers_read_local_edid(
+                       link->ctx,
+                       link,
+                       sink);
+
+       if (edid_status != EDID_OK)
+               DC_ERROR("Failed to read EDID");
+
+}
+
 static int dm_resume(void *handle)
 {
        struct amdgpu_device *adev = handle;
@@ -654,6 +735,7 @@ static int dm_resume(void *handle)
        struct drm_plane *plane;
        struct drm_plane_state *new_plane_state;
        struct dm_plane_state *dm_new_plane_state;
+       enum dc_connection_type new_connection_type = dc_connection_none;
        int ret;
        int i;
 
@@ -684,7 +766,13 @@ static int dm_resume(void *handle)
                        continue;
 
                mutex_lock(&aconnector->hpd_lock);
-               dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+               if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+                       DRM_ERROR("KMS: Failed to detect connector\n");
+
+               if (aconnector->base.force && new_connection_type == dc_connection_none)
+                       emulated_link_detect(aconnector->dc_link);
+               else
+                       dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
 
                if (aconnector->fake_enable && aconnector->dc_link->local_sink)
                        aconnector->fake_enable = false;
@@ -922,6 +1010,7 @@ static void handle_hpd_irq(void *param)
        struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
        struct drm_connector *connector = &aconnector->base;
        struct drm_device *dev = connector->dev;
+       enum dc_connection_type new_connection_type = dc_connection_none;
 
        /* In case of failure or MST no need to update connector status or notify the OS
         * since (for MST case) MST does this in it's own context.
@@ -931,7 +1020,21 @@ static void handle_hpd_irq(void *param)
        if (aconnector->fake_enable)
                aconnector->fake_enable = false;
 
-       if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
+       if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+               DRM_ERROR("KMS: Failed to detect connector\n");
+
+       if (aconnector->base.force && new_connection_type == dc_connection_none) {
+               emulated_link_detect(aconnector->dc_link);
+
+
+               drm_modeset_lock_all(dev);
+               dm_restore_drm_connector_state(dev, connector);
+               drm_modeset_unlock_all(dev);
+
+               if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+                       drm_kms_helper_hotplug_event(dev);
+
+       } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
                amdgpu_dm_update_connector_after_detect(aconnector);
 
 
@@ -1031,6 +1134,7 @@ static void handle_hpd_rx_irq(void *param)
        struct drm_device *dev = connector->dev;
        struct dc_link *dc_link = aconnector->dc_link;
        bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+       enum dc_connection_type new_connection_type = dc_connection_none;
 
        /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
         * conflict, after implement i2c helper, this mutex should be
@@ -1042,7 +1146,24 @@ static void handle_hpd_rx_irq(void *param)
        if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
                        !is_mst_root_connector) {
                /* Downstream Port status changed. */
-               if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+               if (!dc_link_detect_sink(dc_link, &new_connection_type))
+                       DRM_ERROR("KMS: Failed to detect connector\n");
+
+               if (aconnector->base.force && new_connection_type == dc_connection_none) {
+                       emulated_link_detect(dc_link);
+
+                       if (aconnector->fake_enable)
+                               aconnector->fake_enable = false;
+
+                       amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+                       drm_modeset_lock_all(dev);
+                       dm_restore_drm_connector_state(dev, connector);
+                       drm_modeset_unlock_all(dev);
+
+                       drm_kms_helper_hotplug_event(dev);
+               } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
 
                        if (aconnector->fake_enable)
                                aconnector->fake_enable = false;
@@ -1433,6 +1554,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
        struct amdgpu_mode_info *mode_info = &adev->mode_info;
        uint32_t link_cnt;
        int32_t total_overlay_planes, total_primary_planes;
+       enum dc_connection_type new_connection_type = dc_connection_none;
 
        link_cnt = dm->dc->caps.max_links;
        if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1499,7 +1621,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 
                link = dc_get_link_at_index(dm->dc, i);
 
-               if (dc_link_detect(link, DETECT_REASON_BOOT)) {
+               if (!dc_link_detect_sink(link, &new_connection_type))
+                       DRM_ERROR("KMS: Failed to detect connector\n");
+
+               if (aconnector->base.force && new_connection_type == dc_connection_none) {
+                       emulated_link_detect(link);
+                       amdgpu_dm_update_connector_after_detect(aconnector);
+
+               } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
                        amdgpu_dm_update_connector_after_detect(aconnector);
                        register_backlight_device(dm, link);
                }
@@ -2494,7 +2623,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        if (dm_state && dm_state->freesync_capable)
                stream->ignore_msa_timing_param = true;
 finish:
-       if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
+       if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
                dc_sink_release(sink);
 
        return stream;
@@ -4504,12 +4633,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        }
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
-       /* Signal HW programming completion */
-       drm_atomic_helper_commit_hw_done(state);
 
        if (wait_for_vblank)
                drm_atomic_helper_wait_for_flip_done(dev, state);
 
+       /*
+        * FIXME:
+        * Delay hw_done() until flip_done() is signaled. This is to block
+        * another commit from freeing the CRTC state while we're still
+        * waiting on flip_done.
+        */
+       drm_atomic_helper_commit_hw_done(state);
+
        drm_atomic_helper_cleanup_planes(dev, state);
 
        /* Finally, drop a runtime PM reference for each newly disabled CRTC,
index 37eaf72ace549d6f132b9fc5933da434fc164396..fced3c1c2ef5f6ac117a993714de8bf24da2258a 100644 (file)
@@ -195,7 +195,7 @@ static bool program_hpd_filter(
        return result;
 }
 
-static bool detect_sink(struct dc_link *link, enum dc_connection_type *type)
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
 {
        uint32_t is_hpd_high = 0;
        struct gpio *hpd_pin;
@@ -604,7 +604,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
        if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
                return false;
 
-       if (false == detect_sink(link, &new_connection_type)) {
+       if (false == dc_link_detect_sink(link, &new_connection_type)) {
                BREAK_TO_DEBUGGER();
                return false;
        }
index d43cefbc43d3b195c40ad6f6829eadeaeb96daee..1b48ab9aea897cd28422a57c11909a769cd7a6a3 100644 (file)
@@ -215,6 +215,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
 
 bool dc_link_is_dp_sink_present(struct dc_link *link);
 
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
 /*
  * DPCD access interfaces
  */
index 14384d9675a8c4cc23fc67162f9f3f9cd7a17a22..b2f308766a9e8e5cfc6c34dea6e9b4515cf7fc8d 100644 (file)
@@ -2560,7 +2560,7 @@ static void pplib_apply_display_requirements(
        dc->prev_display_config = *pp_display_cfg;
 }
 
-void dce110_set_bandwidth(
+static void dce110_set_bandwidth(
                struct dc *dc,
                struct dc_state *context,
                bool decrease_allowed)
index e4c5db75c4c656b010e16ab490286801bb94bfb4..d6db3dbd90153ba4a3f9511eb494552b143b4255 100644 (file)
@@ -68,11 +68,6 @@ void dce110_fill_display_configs(
        const struct dc_state *context,
        struct dm_pp_display_configuration *pp_display_cfg);
 
-void dce110_set_bandwidth(
-               struct dc *dc,
-               struct dc_state *context,
-               bool decrease_allowed);
-
 uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
 
 void dp_receiver_power_ctrl(struct dc_link *link, bool on);
index 5853522a618298a6bdc20df629f1c494d13468ae..eb0f5f9a973b9b2f79793023d8f1cdf21aae787c 100644 (file)
@@ -244,17 +244,6 @@ static void dce120_update_dchub(
        dh_data->dchub_info_valid = false;
 }
 
-static void dce120_set_bandwidth(
-               struct dc *dc,
-               struct dc_state *context,
-               bool decrease_allowed)
-{
-       if (context->stream_count <= 0)
-               return;
-
-       dce110_set_bandwidth(dc, context, decrease_allowed);
-}
-
 void dce120_hw_sequencer_construct(struct dc *dc)
 {
        /* All registers used by dce11.2 match those in dce11 in offset and
@@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
        dce110_hw_sequencer_construct(dc);
        dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
        dc->hwss.update_dchub = dce120_update_dchub;
-       dc->hwss.set_bandwidth = dce120_set_bandwidth;
 }
 
index 08b5bb219816ad38f929a23ccb69b378cba2101f..94d6dabec2dc80ee47794430d0642de6846fbfe4 100644 (file)
@@ -754,6 +754,7 @@ static int malidp_bind(struct device *dev)
        drm->irq_enabled = true;
 
        ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+       drm_crtc_vblank_reset(&malidp->crtc);
        if (ret < 0) {
                DRM_ERROR("failed to initialise vblank\n");
                goto vblank_fail;
index c94a4422e0e9100a607a8b817878c25942ce77bd..2781e462c1ed5dd7b275ad0d9b9cccc1cc258a1f 100644 (file)
@@ -384,7 +384,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
 
 static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
                                     dma_addr_t *addrs, s32 *pitches,
-                                    int num_planes, u16 w, u16 h, u32 fmt_id)
+                                    int num_planes, u16 w, u16 h, u32 fmt_id,
+                                    const s16 *rgb2yuv_coeffs)
 {
        u32 base = MALIDP500_SE_MEMWRITE_BASE;
        u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -416,6 +417,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
 
        malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
                        MALIDP500_SE_MEMWRITE_OUT_SIZE);
+
+       if (rgb2yuv_coeffs) {
+               int i;
+
+               for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+                       malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
+                                       MALIDP500_SE_RGB_YUV_COEFFS + i * 4);
+               }
+       }
+
        malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
 
        return 0;
@@ -658,7 +669,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
 
 static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
                                     dma_addr_t *addrs, s32 *pitches,
-                                    int num_planes, u16 w, u16 h, u32 fmt_id)
+                                    int num_planes, u16 w, u16 h, u32 fmt_id,
+                                    const s16 *rgb2yuv_coeffs)
 {
        u32 base = MALIDP550_SE_MEMWRITE_BASE;
        u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -689,6 +701,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
        malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
                          MALIDP550_SE_CONTROL);
 
+       if (rgb2yuv_coeffs) {
+               int i;
+
+               for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+                       malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
+                                       MALIDP550_SE_RGB_YUV_COEFFS + i * 4);
+               }
+       }
+
        return 0;
 }
 
index ad2e96915d44a253c8d77dc2e5d98c22c758fc5c..9fc94c08190f23184985e3beb40f4cc88dc2e430 100644 (file)
@@ -191,7 +191,8 @@ struct malidp_hw {
         * @param fmt_id - internal format ID of output buffer
         */
        int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
-                              s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id);
+                              s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id,
+                              const s16 *rgb2yuv_coeffs);
 
        /*
         * Disable the writing to memory of the next frame's content.
index ba6ae66387c9129063cc894ed99c5dc0854a4c07..91472e5e0c8b8a3b0c19a2fb5f860fac83ffcb5b 100644 (file)
@@ -26,6 +26,8 @@ struct malidp_mw_connector_state {
        s32 pitches[2];
        u8 format;
        u8 n_planes;
+       bool rgb2yuv_initialized;
+       const s16 *rgb2yuv_coeffs;
 };
 
 static int malidp_mw_connector_get_modes(struct drm_connector *connector)
@@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector)
 static struct drm_connector_state *
 malidp_mw_connector_duplicate_state(struct drm_connector *connector)
 {
-       struct malidp_mw_connector_state *mw_state;
+       struct malidp_mw_connector_state *mw_state, *mw_current_state;
 
        if (WARN_ON(!connector->state))
                return NULL;
@@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector)
        if (!mw_state)
                return NULL;
 
-       /* No need to preserve any of our driver-local data */
+       mw_current_state = to_mw_state(connector->state);
+       mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs;
+       mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized;
+
        __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
 
        return &mw_state->base;
@@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = {
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
+static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = {
+       47,  157,   16,
+       -26,  -87,  112,
+       112, -102,  -10,
+       16,  128,  128
+};
+
 static int
 malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
                               struct drm_crtc_state *crtc_state,
@@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
        }
        mw_state->n_planes = n_planes;
 
+       if (fb->format->is_yuv)
+               mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited;
+
        return 0;
 }
 
@@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
 
                drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
                conn_state->writeback_job = NULL;
-
                hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
                                           mw_state->pitches, mw_state->n_planes,
-                                          fb->width, fb->height, mw_state->format);
+                                          fb->width, fb->height, mw_state->format,
+                                          !mw_state->rgb2yuv_initialized ?
+                                          mw_state->rgb2yuv_coeffs : NULL);
+               mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs;
        } else {
                DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
                hwdev->hw->disable_memwrite(hwdev);
index 3579d36b2a717aedc436cd574b8639f9ea1d7d9b..6ffe849774f2edbec087aabe640ba0f61720b6f2 100644 (file)
 #define MALIDP500_SE_BASE              0x00c00
 #define MALIDP500_SE_CONTROL           0x00c0c
 #define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
+#define MALIDP500_SE_RGB_YUV_COEFFS    0x00C74
 #define MALIDP500_SE_MEMWRITE_BASE     0x00e00
 #define MALIDP500_DC_IRQ_BASE          0x00f00
 #define MALIDP500_CONFIG_VALID         0x00f00
 #define MALIDP550_SE_CONTROL           0x08010
 #define   MALIDP550_SE_MEMWRITE_ONESHOT        (1 << 7)
 #define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
+#define MALIDP550_SE_RGB_YUV_COEFFS    0x08078
 #define MALIDP550_SE_MEMWRITE_BASE     0x08100
 #define MALIDP550_DC_BASE              0x0c000
 #define MALIDP550_DC_CONTROL           0x0c010
index baff50a4c2349dfec2ae8df1f14c0b5714f29020..df31c3815092b33f6fbbd6db1cebf341642e1b66 100644 (file)
@@ -63,20 +63,21 @@ static void drm_client_close(struct drm_client_dev *client)
 EXPORT_SYMBOL(drm_client_close);
 
 /**
- * drm_client_new - Create a DRM client
+ * drm_client_init - Initialise a DRM client
  * @dev: DRM device
  * @client: DRM client
  * @name: Client name
  * @funcs: DRM client functions (optional)
  *
+ * This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
  * The caller needs to hold a reference on @dev before calling this function.
  * The client is freed when the &drm_device is unregistered. See drm_client_release().
  *
  * Returns:
  * Zero on success or negative error code on failure.
  */
-int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
-                  const char *name, const struct drm_client_funcs *funcs)
+int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
+                   const char *name, const struct drm_client_funcs *funcs)
 {
        int ret;
 
@@ -95,10 +96,6 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
        if (ret)
                goto err_put_module;
 
-       mutex_lock(&dev->clientlist_mutex);
-       list_add(&client->list, &dev->clientlist);
-       mutex_unlock(&dev->clientlist_mutex);
-
        drm_dev_get(dev);
 
        return 0;
@@ -109,13 +106,33 @@ err_put_module:
 
        return ret;
 }
-EXPORT_SYMBOL(drm_client_new);
+EXPORT_SYMBOL(drm_client_init);
+
+/**
+ * drm_client_add - Add client to the device list
+ * @client: DRM client
+ *
+ * Add the client to the &drm_device client list to activate its callbacks.
+ * @client must be initialized by a call to drm_client_init(). After
+ * drm_client_add() it is no longer permissible to call drm_client_release()
+ * directly (outside the unregister callback), instead cleanup will happen
+ * automatically on driver unload.
+ */
+void drm_client_add(struct drm_client_dev *client)
+{
+       struct drm_device *dev = client->dev;
+
+       mutex_lock(&dev->clientlist_mutex);
+       list_add(&client->list, &dev->clientlist);
+       mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_add);
 
 /**
  * drm_client_release - Release DRM client resources
  * @client: DRM client
  *
- * Releases resources by closing the &drm_file that was opened by drm_client_new().
+ * Releases resources by closing the &drm_file that was opened by drm_client_init().
  * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
  *
  * This function should only be called from the unregister callback. An exception
index 9da36a6271d3a24380e6a1221ae027701eba6e52..9ac1f2e0f064cb72528834ed6c21a82daf37be45 100644 (file)
@@ -160,7 +160,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
 
        fb_helper = &fbdev_cma->fb_helper;
 
-       ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL);
+       ret = drm_client_init(dev, &fb_helper->client, "fbdev", NULL);
        if (ret)
                goto err_free;
 
@@ -169,6 +169,8 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
        if (ret)
                goto err_client_put;
 
+       drm_client_add(&fb_helper->client);
+
        return fbdev_cma;
 
 err_client_put:
index 16ec93b75dbfaf87fca050d84dfe8ee292226424..515a7aec57acc48505f195f462a1c46c69da7cbb 100644 (file)
@@ -3218,12 +3218,14 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
        if (!fb_helper)
                return -ENOMEM;
 
-       ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+       ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
        if (ret) {
                kfree(fb_helper);
                return ret;
        }
 
+       drm_client_add(&fb_helper->client);
+
        fb_helper->preferred_bpp = preferred_bpp;
 
        drm_fbdev_client_hotplug(&fb_helper->client);
index b54fb78a283c642e8541370482c627ea9567dc8e..b82da96ded5c85d847c9c7566236f1deaf48ffc2 100644 (file)
@@ -566,14 +566,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
        lessee_priv->is_master = 1;
        lessee_priv->authenticated = 1;
 
-       /* Hook up the fd */
-       fd_install(fd, lessee_file);
-
        /* Pass fd back to userspace */
        DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
        cl->fd = fd;
        cl->lessee_id = lessee->lessee_id;
 
+       /* Hook up the fd */
+       fd_install(fd, lessee_file);
+
        DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
        return 0;
 
index b902361dee6e1db300c10ce5798de7bada296b65..1d9a9d2fe0e098c4f00b59816b92d262c9620073 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/err.h>
 #include <linux/module.h>
 
-#include <drm/drm_device.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_panel.h>
 
@@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
        if (panel->connector)
                return -EBUSY;
 
-       panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
-       if (!panel->link) {
-               dev_err(panel->dev, "failed to link panel to %s\n",
-                       dev_name(connector->dev->dev));
-               return -EINVAL;
-       }
-
        panel->connector = connector;
        panel->drm = connector->dev;
 
@@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach);
  */
 int drm_panel_detach(struct drm_panel *panel)
 {
-       device_link_del(panel->link);
-
        panel->connector = NULL;
        panel->drm = NULL;
 
index adb3cb27d31e6fa6aa1f0c3102c7d8ede5599169..759278fef35ae6ee4f9889b09c0dfffef2af8ca8 100644 (file)
@@ -97,6 +97,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
 {
        int ret;
 
+       WARN_ON(*fence);
+
        *fence = drm_syncobj_fence_get(syncobj);
        if (*fence)
                return 1;
@@ -743,6 +745,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 
        if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
                for (i = 0; i < count; ++i) {
+                       if (entries[i].fence)
+                               continue;
+
                        drm_syncobj_fence_get_or_add_callback(syncobjs[i],
                                                              &entries[i].fence,
                                                              &entries[i].syncobj_cb,
index 9b2720b41571f245a1ba5ad677bb0566d95ca207..83c1f46670bfea9dcbe95e42da598c8665576b03 100644 (file)
@@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct component_match *match = NULL;
 
-       dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-
        if (!dev->platform_data) {
                struct device_node *core_node;
 
@@ -655,13 +653,30 @@ static int __init etnaviv_init(void)
        for_each_compatible_node(np, NULL, "vivante,gc") {
                if (!of_device_is_available(np))
                        continue;
-               pdev = platform_device_register_simple("etnaviv", -1,
-                                                      NULL, 0);
-               if (IS_ERR(pdev)) {
-                       ret = PTR_ERR(pdev);
+
+               pdev = platform_device_alloc("etnaviv", -1);
+               if (!pdev) {
+                       ret = -ENOMEM;
+                       of_node_put(np);
+                       goto unregister_platform_driver;
+               }
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+               /*
+                * Apply the same DMA configuration to the virtual etnaviv
+                * device as the GPU we found. This assumes that all Vivante
+                * GPUs in the system share the same DMA constraints.
+                */
+               of_dma_configure(&pdev->dev, np, true);
+
+               ret = platform_device_add(pdev);
+               if (ret) {
+                       platform_device_put(pdev);
                        of_node_put(np);
                        goto unregister_platform_driver;
                }
+
                etnaviv_drm = pdev;
                of_node_put(np);
                break;
index 87f6b5672e1193a1df76bf3c09eb69abfd966e55..797d9ee5f15a75c5aee52966d0145b7023e94574 100644 (file)
@@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
 static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
                                        unsigned long start, unsigned long size)
 {
-       struct iommu_domain *domain;
-       int ret;
-
-       domain = iommu_domain_alloc(priv->dma_dev->bus);
-       if (!domain)
-               return -ENOMEM;
-
-       ret = iommu_get_dma_cookie(domain);
-       if (ret)
-               goto free_domain;
-
-       ret = iommu_dma_init_domain(domain, start, size, NULL);
-       if (ret)
-               goto put_cookie;
-
-       priv->mapping = domain;
+       priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
        return 0;
-
-put_cookie:
-       iommu_put_dma_cookie(domain);
-free_domain:
-       iommu_domain_free(domain);
-       return ret;
 }
 
 static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
 {
-       struct iommu_domain *domain = priv->mapping;
-
-       iommu_put_dma_cookie(domain);
-       iommu_domain_free(domain);
        priv->mapping = NULL;
 }
 
@@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
 {
        struct iommu_domain *domain = priv->mapping;
 
-       return iommu_attach_device(domain, dev);
+       if (dev != priv->dma_dev)
+               return iommu_attach_device(domain, dev);
+       return 0;
 }
 
 static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
@@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
 {
        struct iommu_domain *domain = priv->mapping;
 
-       iommu_detach_device(domain, dev);
+       if (dev != priv->dma_dev)
+               iommu_detach_device(domain, dev);
 }
 #else
 #error Unsupported architecture and IOMMU/DMA-mapping glue code
index 5d2f0d548469e1dd4808d8be5dfe8be62edfef0b..250b5e02a314a493fd0c41477237bf44f7208225 100644 (file)
@@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
                        break;
                }
                /* TDA9950 executes all retries for us */
-               tx_status |= CEC_TX_STATUS_MAX_RETRIES;
+               if (tx_status != CEC_TX_STATUS_OK)
+                       tx_status |= CEC_TX_STATUS_MAX_RETRIES;
                cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
                                  nack_cnt, 0, err_cnt);
                break;
@@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv)
        /* Wait up to .5s for it to signal non-busy */
        do {
                csr = tda9950_read(client, REG_CSR);
-               if (!(csr & CSR_BUSY) || --timeout)
+               if (!(csr & CSR_BUSY) || !--timeout)
                        break;
                msleep(10);
        } while (1);
index f7f2aa71d8d99f1c4fa4e4d4632adeb48ecd50f3..a262a64f562565d80642cf887788ac261991336e 100644 (file)
@@ -232,6 +232,20 @@ static bool compress_init(struct compress *c)
        return true;
 }
 
+static void *compress_next_page(struct drm_i915_error_object *dst)
+{
+       unsigned long page;
+
+       if (dst->page_count >= dst->num_pages)
+               return ERR_PTR(-ENOSPC);
+
+       page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+       if (!page)
+               return ERR_PTR(-ENOMEM);
+
+       return dst->pages[dst->page_count++] = (void *)page;
+}
+
 static int compress_page(struct compress *c,
                         void *src,
                         struct drm_i915_error_object *dst)
@@ -245,19 +259,14 @@ static int compress_page(struct compress *c,
 
        do {
                if (zstream->avail_out == 0) {
-                       unsigned long page;
-
-                       page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
-                       if (!page)
-                               return -ENOMEM;
+                       zstream->next_out = compress_next_page(dst);
+                       if (IS_ERR(zstream->next_out))
+                               return PTR_ERR(zstream->next_out);
 
-                       dst->pages[dst->page_count++] = (void *)page;
-
-                       zstream->next_out = (void *)page;
                        zstream->avail_out = PAGE_SIZE;
                }
 
-               if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
+               if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
                        return -EIO;
        } while (zstream->avail_in);
 
@@ -268,19 +277,42 @@ static int compress_page(struct compress *c,
        return 0;
 }
 
-static void compress_fini(struct compress *c,
+static int compress_flush(struct compress *c,
                          struct drm_i915_error_object *dst)
 {
        struct z_stream_s *zstream = &c->zstream;
 
-       if (dst) {
-               zlib_deflate(zstream, Z_FINISH);
-               dst->unused = zstream->avail_out;
-       }
+       do {
+               switch (zlib_deflate(zstream, Z_FINISH)) {
+               case Z_OK: /* more space requested */
+                       zstream->next_out = compress_next_page(dst);
+                       if (IS_ERR(zstream->next_out))
+                               return PTR_ERR(zstream->next_out);
+
+                       zstream->avail_out = PAGE_SIZE;
+                       break;
+
+               case Z_STREAM_END:
+                       goto end;
+
+               default: /* any error */
+                       return -EIO;
+               }
+       } while (1);
+
+end:
+       memset(zstream->next_out, 0, zstream->avail_out);
+       dst->unused = zstream->avail_out;
+       return 0;
+}
+
+static void compress_fini(struct compress *c,
+                         struct drm_i915_error_object *dst)
+{
+       struct z_stream_s *zstream = &c->zstream;
 
        zlib_deflateEnd(zstream);
        kfree(zstream->workspace);
-
        if (c->tmp)
                free_page((unsigned long)c->tmp);
 }
@@ -319,6 +351,12 @@ static int compress_page(struct compress *c,
        return 0;
 }
 
+static int compress_flush(struct compress *c,
+                         struct drm_i915_error_object *dst)
+{
+       return 0;
+}
+
 static void compress_fini(struct compress *c,
                          struct drm_i915_error_object *dst)
 {
@@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915,
        unsigned long num_pages;
        struct sgt_iter iter;
        dma_addr_t dma;
+       int ret;
 
        if (!vma)
                return NULL;
@@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915,
 
        dst->gtt_offset = vma->node.start;
        dst->gtt_size = vma->node.size;
+       dst->num_pages = num_pages;
        dst->page_count = 0;
        dst->unused = 0;
 
@@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915,
                return NULL;
        }
 
+       ret = -EINVAL;
        for_each_sgt_dma(dma, iter, vma->pages) {
                void __iomem *s;
-               int ret;
 
                ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
 
                s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
                ret = compress_page(&compress, (void  __force *)s, dst);
                io_mapping_unmap_atomic(s);
-
                if (ret)
-                       goto unwind;
+                       break;
        }
-       goto out;
 
-unwind:
-       while (dst->page_count--)
-               free_page((unsigned long)dst->pages[dst->page_count]);
-       kfree(dst);
-       dst = NULL;
+       if (ret || compress_flush(&compress, dst)) {
+               while (dst->page_count--)
+                       free_page((unsigned long)dst->pages[dst->page_count]);
+               kfree(dst);
+               dst = NULL;
+       }
 
-out:
        compress_fini(&compress, dst);
        ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
        return dst;
index f893a4e8b7831d7b214cf60ab7bcf9b058070714..8710fb18ed746cface7e9a7b2d6d6ac7cd06b2b4 100644 (file)
@@ -135,6 +135,7 @@ struct i915_gpu_state {
                struct drm_i915_error_object {
                        u64 gtt_offset;
                        u64 gtt_size;
+                       int num_pages;
                        int page_count;
                        int unused;
                        u32 *pages[0];
index 90628a47ae17f81312dff51ddbc89aff4af55654..29877969310dae65ff8f0a035285deb463c0d342 100644 (file)
@@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
        spin_unlock(&i915->irq_lock);
 }
 
-static void
-gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
-                     u32 *iir)
+static u32
+gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
 {
        void __iomem * const regs = dev_priv->regs;
+       u32 iir;
 
        if (!(master_ctl & GEN11_GU_MISC_IRQ))
-               return;
+               return 0;
+
+       iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
+       if (likely(iir))
+               raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
 
-       *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
-       if (likely(*iir))
-               raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
+       return iir;
 }
 
 static void
-gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
-                         const u32 master_ctl, const u32 iir)
+gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
 {
-       if (!(master_ctl & GEN11_GU_MISC_IRQ))
-               return;
-
-       if (unlikely(!iir)) {
-               DRM_ERROR("GU_MISC iir blank!\n");
-               return;
-       }
-
        if (iir & GEN11_GU_MISC_GSE)
                intel_opregion_asle_intr(dev_priv);
-       else
-               DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
 }
 
 static irqreturn_t gen11_irq_handler(int irq, void *arg)
@@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
                enable_rpm_wakeref_asserts(i915);
        }
 
-       gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
+       gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
 
        /* Acknowledge and enable interrupts. */
        raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
 
-       gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
+       gen11_gu_misc_irq_handler(i915, gu_misc_iir);
 
        return IRQ_HANDLED;
 }
index 6a4d1388ad2d39b2f972e0d1e1270bab8d3ce8df..1df3ce134cd0086de8b05cc7dc0fce4de4e91926 100644 (file)
@@ -592,7 +592,6 @@ static const struct intel_device_info intel_cannonlake_info = {
        GEN10_FEATURES, \
        GEN(11), \
        .ddb_size = 2048, \
-       .has_csr = 0, \
        .has_logical_ring_elsq = 1
 
 static const struct intel_device_info intel_icelake_11_info = {
index 5146ee029db4bd6c35bb3a15cdd25b6caf54c64e..bc49909aba8e664b6675fcac13921128c661dce9 100644 (file)
 #define USB_DEVICE_ID_SIS817_TOUCH     0x0817
 #define USB_DEVICE_ID_SIS_TS           0x1013
 #define USB_DEVICE_ID_SIS1030_TOUCH    0x1030
-#define USB_DEVICE_ID_SIS10FB_TOUCH    0x10fb
 
 #define USB_VENDOR_ID_SKYCABLE                 0x1223
 #define        USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER       0x3F07
index f3076659361abcb0567804c298af25e278de8fa9..4e3592e7a3f7217f86fe0fba59d3ea73551dcab2 100644 (file)
@@ -47,7 +47,7 @@
 /* quirks to control the device */
 #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV       BIT(0)
 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET       BIT(1)
-#define I2C_HID_QUIRK_RESEND_REPORT_DESCR      BIT(2)
+#define I2C_HID_QUIRK_NO_RUNTIME_PM            BIT(2)
 
 /* flags */
 #define I2C_HID_STARTED                0
@@ -169,9 +169,8 @@ static const struct i2c_hid_quirks {
        { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
                I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
        { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
-               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
-       { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
-               I2C_HID_QUIRK_RESEND_REPORT_DESCR },
+               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
+               I2C_HID_QUIRK_NO_RUNTIME_PM },
        { 0, 0 }
 };
 
@@ -1105,7 +1104,9 @@ static int i2c_hid_probe(struct i2c_client *client,
                goto err_mem_free;
        }
 
-       pm_runtime_put(&client->dev);
+       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+               pm_runtime_put(&client->dev);
+
        return 0;
 
 err_mem_free:
@@ -1130,7 +1131,8 @@ static int i2c_hid_remove(struct i2c_client *client)
        struct i2c_hid *ihid = i2c_get_clientdata(client);
        struct hid_device *hid;
 
-       pm_runtime_get_sync(&client->dev);
+       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+               pm_runtime_get_sync(&client->dev);
        pm_runtime_disable(&client->dev);
        pm_runtime_set_suspended(&client->dev);
        pm_runtime_put_noidle(&client->dev);
@@ -1236,22 +1238,13 @@ static int i2c_hid_resume(struct device *dev)
 
        /* Instead of resetting device, simply powers the device on. This
         * solves "incomplete reports" on Raydium devices 2386:3118 and
-        * 2386:4B33
+        * 2386:4B33 and fixes various SIS touchscreens no longer sending
+        * data after a suspend/resume.
         */
        ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
        if (ret)
                return ret;
 
-       /* Some devices need to re-send report descr cmd
-        * after resume, after this it will be back normal.
-        * otherwise it issues too many incomplete reports.
-        */
-       if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
-               ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
-               if (ret)
-                       return ret;
-       }
-
        if (hid->driver && hid->driver->reset_resume) {
                ret = hid->driver->reset_resume(hid);
                return ret;
index da133716bed05b63dadef22e072f05b4e7b0f5da..08a8327dfd224852cb81599959eeac09fd0c5a9a 100644 (file)
@@ -29,6 +29,7 @@
 #define CNL_Ax_DEVICE_ID       0x9DFC
 #define GLK_Ax_DEVICE_ID       0x31A2
 #define CNL_H_DEVICE_ID                0xA37C
+#define ICL_MOBILE_DEVICE_ID   0x34FC
 #define SPT_H_DEVICE_ID                0xA135
 
 #define        REVISION_ID_CHT_A0      0x6
index a1125a5c7965a255f8b5480f47cc8a53b534b76f..256b3016116cecca6ece2f8ae2d94422cd875251 100644 (file)
@@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
        {0, }
 };
index 0bee1f4b914e751d9893875665d0b588d7e3258d..3208ad6ad54014776cd333f4ac068c56cc737484 100644 (file)
@@ -337,6 +337,39 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
        return 0;
 }
 
+/**
+ * del_gid - Delete GID table entry
+ *
+ * @ib_dev:    IB device whose GID entry to be deleted
+ * @port:      Port number of the IB device
+ * @table:     GID table of the IB device for a port
+ * @ix:                GID entry index to delete
+ *
+ */
+static void del_gid(struct ib_device *ib_dev, u8 port,
+                   struct ib_gid_table *table, int ix)
+{
+       struct ib_gid_table_entry *entry;
+
+       lockdep_assert_held(&table->lock);
+
+       pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
+                ib_dev->name, port, ix,
+                table->data_vec[ix]->attr.gid.raw);
+
+       write_lock_irq(&table->rwlock);
+       entry = table->data_vec[ix];
+       entry->state = GID_TABLE_ENTRY_PENDING_DEL;
+       /*
+        * For non RoCE protocol, GID entry slot is ready to use.
+        */
+       if (!rdma_protocol_roce(ib_dev, port))
+               table->data_vec[ix] = NULL;
+       write_unlock_irq(&table->rwlock);
+
+       put_gid_entry_locked(entry);
+}
+
 /**
  * add_modify_gid - Add or modify GID table entry
  *
@@ -358,7 +391,7 @@ static int add_modify_gid(struct ib_gid_table *table,
         * this index.
         */
        if (is_gid_entry_valid(table->data_vec[attr->index]))
-               put_gid_entry(table->data_vec[attr->index]);
+               del_gid(attr->device, attr->port_num, table, attr->index);
 
        /*
         * Some HCA's report multiple GID entries with only one valid GID, and
@@ -386,39 +419,6 @@ done:
        return ret;
 }
 
-/**
- * del_gid - Delete GID table entry
- *
- * @ib_dev:    IB device whose GID entry to be deleted
- * @port:      Port number of the IB device
- * @table:     GID table of the IB device for a port
- * @ix:                GID entry index to delete
- *
- */
-static void del_gid(struct ib_device *ib_dev, u8 port,
-                   struct ib_gid_table *table, int ix)
-{
-       struct ib_gid_table_entry *entry;
-
-       lockdep_assert_held(&table->lock);
-
-       pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
-                ib_dev->name, port, ix,
-                table->data_vec[ix]->attr.gid.raw);
-
-       write_lock_irq(&table->rwlock);
-       entry = table->data_vec[ix];
-       entry->state = GID_TABLE_ENTRY_PENDING_DEL;
-       /*
-        * For non RoCE protocol, GID entry slot is ready to use.
-        */
-       if (!rdma_protocol_roce(ib_dev, port))
-               table->data_vec[ix] = NULL;
-       write_unlock_irq(&table->rwlock);
-
-       put_gid_entry_locked(entry);
-}
-
 /* rwlock should be read locked, or lock should be held */
 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
                    const struct ib_gid_attr *val, bool default_gid,
index 5f437d1570fb02d516b1ee60f0a4cee42053da50..21863ddde63e3040b285d9decd8a2ee1c47534b8 100644 (file)
@@ -1759,6 +1759,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
                mutex_lock(&mut);
                if (!ctx->closing) {
                        mutex_unlock(&mut);
+                       ucma_put_ctx(ctx);
+                       wait_for_completion(&ctx->comp);
                        /* rdma_destroy_id ensures that no event handlers are
                         * inflight for that id before releasing it.
                         */
index a21d5214afc367b260fd445642c55e6040d02dfb..e012ca80f9d196ddbb8723691ec4087a55c0d863 100644 (file)
@@ -2027,33 +2027,55 @@ static int modify_qp(struct ib_uverbs_file *file,
 
        if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
            cmd->base.cur_qp_state > IB_QPS_ERR) ||
-           cmd->base.qp_state > IB_QPS_ERR) {
+           (cmd->base.attr_mask & IB_QP_STATE &&
+           cmd->base.qp_state > IB_QPS_ERR)) {
                ret = -EINVAL;
                goto release_qp;
        }
 
-       attr->qp_state            = cmd->base.qp_state;
-       attr->cur_qp_state        = cmd->base.cur_qp_state;
-       attr->path_mtu            = cmd->base.path_mtu;
-       attr->path_mig_state      = cmd->base.path_mig_state;
-       attr->qkey                = cmd->base.qkey;
-       attr->rq_psn              = cmd->base.rq_psn;
-       attr->sq_psn              = cmd->base.sq_psn;
-       attr->dest_qp_num         = cmd->base.dest_qp_num;
-       attr->qp_access_flags     = cmd->base.qp_access_flags;
-       attr->pkey_index          = cmd->base.pkey_index;
-       attr->alt_pkey_index      = cmd->base.alt_pkey_index;
-       attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
-       attr->max_rd_atomic       = cmd->base.max_rd_atomic;
-       attr->max_dest_rd_atomic  = cmd->base.max_dest_rd_atomic;
-       attr->min_rnr_timer       = cmd->base.min_rnr_timer;
-       attr->port_num            = cmd->base.port_num;
-       attr->timeout             = cmd->base.timeout;
-       attr->retry_cnt           = cmd->base.retry_cnt;
-       attr->rnr_retry           = cmd->base.rnr_retry;
-       attr->alt_port_num        = cmd->base.alt_port_num;
-       attr->alt_timeout         = cmd->base.alt_timeout;
-       attr->rate_limit          = cmd->rate_limit;
+       if (cmd->base.attr_mask & IB_QP_STATE)
+               attr->qp_state = cmd->base.qp_state;
+       if (cmd->base.attr_mask & IB_QP_CUR_STATE)
+               attr->cur_qp_state = cmd->base.cur_qp_state;
+       if (cmd->base.attr_mask & IB_QP_PATH_MTU)
+               attr->path_mtu = cmd->base.path_mtu;
+       if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
+               attr->path_mig_state = cmd->base.path_mig_state;
+       if (cmd->base.attr_mask & IB_QP_QKEY)
+               attr->qkey = cmd->base.qkey;
+       if (cmd->base.attr_mask & IB_QP_RQ_PSN)
+               attr->rq_psn = cmd->base.rq_psn;
+       if (cmd->base.attr_mask & IB_QP_SQ_PSN)
+               attr->sq_psn = cmd->base.sq_psn;
+       if (cmd->base.attr_mask & IB_QP_DEST_QPN)
+               attr->dest_qp_num = cmd->base.dest_qp_num;
+       if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
+               attr->qp_access_flags = cmd->base.qp_access_flags;
+       if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
+               attr->pkey_index = cmd->base.pkey_index;
+       if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+               attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
+       if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+               attr->max_rd_atomic = cmd->base.max_rd_atomic;
+       if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+               attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
+       if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
+               attr->min_rnr_timer = cmd->base.min_rnr_timer;
+       if (cmd->base.attr_mask & IB_QP_PORT)
+               attr->port_num = cmd->base.port_num;
+       if (cmd->base.attr_mask & IB_QP_TIMEOUT)
+               attr->timeout = cmd->base.timeout;
+       if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
+               attr->retry_cnt = cmd->base.retry_cnt;
+       if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
+               attr->rnr_retry = cmd->base.rnr_retry;
+       if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
+               attr->alt_port_num = cmd->base.alt_port_num;
+               attr->alt_timeout = cmd->base.alt_timeout;
+               attr->alt_pkey_index = cmd->base.alt_pkey_index;
+       }
+       if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
+               attr->rate_limit = cmd->rate_limit;
 
        if (cmd->base.attr_mask & IB_QP_AV)
                copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
index 6d974e2363df249c4291a3331d9d16f0a14232a6..50152c1b100452f7a4c8a9f733739ac25cbe777d 100644 (file)
@@ -440,6 +440,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
                        list_del(&entry->obj_list);
                kfree(entry);
        }
+       file->ev_queue.is_closed = 1;
        spin_unlock_irq(&file->ev_queue.lock);
 
        uverbs_close_fd(filp);
index 73ea6f0db88fb5c2b2bbd698be684baf34dc5331..be854628a7c63149c05ed74f9f001a39fb1bc59d 100644 (file)
@@ -248,6 +248,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi)
                kfree(rcu_dereference_protected(*slot, true));
                radix_tree_iter_delete(&uapi->radix, &iter, slot);
        }
+       kfree(uapi);
 }
 
 struct uverbs_api *uverbs_alloc_api(
index 20b9f31052bf974fe43d335730daa4268ad614de..85cd1a3593d610132ded3796b5b90384bdb0342c 100644 (file)
@@ -78,7 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
 /* Mutex to protect the list of bnxt_re devices added */
 static DEFINE_MUTEX(bnxt_re_dev_lock);
 static struct workqueue_struct *bnxt_re_wq;
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
 
 /* SR-IOV helper functions */
 
@@ -182,7 +182,7 @@ static void bnxt_re_shutdown(void *p)
        if (!rdev)
                return;
 
-       bnxt_re_ib_unreg(rdev, false);
+       bnxt_re_ib_unreg(rdev);
 }
 
 static void bnxt_re_stop_irq(void *handle)
@@ -251,7 +251,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
 /* Driver registration routines used to let the networking driver (bnxt_en)
  * to know that the RoCE driver is now installed
  */
-static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
+static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
 {
        struct bnxt_en_dev *en_dev;
        int rc;
@@ -260,14 +260,9 @@ static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
                return -EINVAL;
 
        en_dev = rdev->en_dev;
-       /* Acquire rtnl lock if it is not invokded from netdev event */
-       if (lock_wait)
-               rtnl_lock();
 
        rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
                                                    BNXT_ROCE_ULP);
-       if (lock_wait)
-               rtnl_unlock();
        return rc;
 }
 
@@ -281,14 +276,12 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
 
        en_dev = rdev->en_dev;
 
-       rtnl_lock();
        rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
                                                  &bnxt_re_ulp_ops, rdev);
-       rtnl_unlock();
        return rc;
 }
 
-static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
+static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
 {
        struct bnxt_en_dev *en_dev;
        int rc;
@@ -298,13 +291,9 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
 
        en_dev = rdev->en_dev;
 
-       if (lock_wait)
-               rtnl_lock();
 
        rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
 
-       if (lock_wait)
-               rtnl_unlock();
        return rc;
 }
 
@@ -320,7 +309,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
 
        num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
 
-       rtnl_lock();
        num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
                                                         rdev->msix_entries,
                                                         num_msix_want);
@@ -335,7 +323,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
        }
        rdev->num_msix = num_msix_got;
 done:
-       rtnl_unlock();
        return rc;
 }
 
@@ -358,24 +345,18 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
        fw_msg->timeout = timeout;
 }
 
-static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
-                                bool lock_wait)
+static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
 {
        struct bnxt_en_dev *en_dev = rdev->en_dev;
        struct hwrm_ring_free_input req = {0};
        struct hwrm_ring_free_output resp;
        struct bnxt_fw_msg fw_msg;
-       bool do_unlock = false;
        int rc = -EINVAL;
 
        if (!en_dev)
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       if (lock_wait) {
-               rtnl_lock();
-               do_unlock = true;
-       }
 
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
        req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -386,8 +367,6 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
        if (rc)
                dev_err(rdev_to_dev(rdev),
                        "Failed to free HW ring:%d :%#x", req.ring_id, rc);
-       if (do_unlock)
-               rtnl_unlock();
        return rc;
 }
 
@@ -405,7 +384,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       rtnl_lock();
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
        req.enables = 0;
        req.page_tbl_addr =  cpu_to_le64(dma_arr[0]);
@@ -426,27 +404,21 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
        if (!rc)
                *fw_ring_id = le16_to_cpu(resp.ring_id);
 
-       rtnl_unlock();
        return rc;
 }
 
 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
-                                     u32 fw_stats_ctx_id, bool lock_wait)
+                                     u32 fw_stats_ctx_id)
 {
        struct bnxt_en_dev *en_dev = rdev->en_dev;
        struct hwrm_stat_ctx_free_input req = {0};
        struct bnxt_fw_msg fw_msg;
-       bool do_unlock = false;
        int rc = -EINVAL;
 
        if (!en_dev)
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       if (lock_wait) {
-               rtnl_lock();
-               do_unlock = true;
-       }
 
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
        req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
@@ -457,8 +429,6 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
                dev_err(rdev_to_dev(rdev),
                        "Failed to free HW stats context %#x", rc);
 
-       if (do_unlock)
-               rtnl_unlock();
        return rc;
 }
 
@@ -478,7 +448,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       rtnl_lock();
 
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
        req.update_period_ms = cpu_to_le32(1000);
@@ -490,7 +459,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
        if (!rc)
                *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
 
-       rtnl_unlock();
        return rc;
 }
 
@@ -929,19 +897,19 @@ fail:
        return rc;
 }
 
-static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
 {
        int i;
 
        for (i = 0; i < rdev->num_msix - 1; i++) {
-               bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait);
+               bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
                bnxt_qplib_free_nq(&rdev->nq[i]);
        }
 }
 
-static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
 {
-       bnxt_re_free_nq_res(rdev, lock_wait);
+       bnxt_re_free_nq_res(rdev);
 
        if (rdev->qplib_res.dpi_tbl.max) {
                bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
@@ -1219,7 +1187,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
        return 0;
 }
 
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
 {
        int i, rc;
 
@@ -1234,28 +1202,27 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
                cancel_delayed_work(&rdev->worker);
 
        bnxt_re_cleanup_res(rdev);
-       bnxt_re_free_res(rdev, lock_wait);
+       bnxt_re_free_res(rdev);
 
        if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
                rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
                if (rc)
                        dev_warn(rdev_to_dev(rdev),
                                 "Failed to deinitialize RCFW: %#x", rc);
-               bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id,
-                                          lock_wait);
+               bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
                bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
                bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
-               bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, lock_wait);
+               bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
                bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
        }
        if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
-               rc = bnxt_re_free_msix(rdev, lock_wait);
+               rc = bnxt_re_free_msix(rdev);
                if (rc)
                        dev_warn(rdev_to_dev(rdev),
                                 "Failed to free MSI-X vectors: %#x", rc);
        }
        if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
-               rc = bnxt_re_unregister_netdev(rdev, lock_wait);
+               rc = bnxt_re_unregister_netdev(rdev);
                if (rc)
                        dev_warn(rdev_to_dev(rdev),
                                 "Failed to unregister with netdev: %#x", rc);
@@ -1276,6 +1243,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
 {
        int i, j, rc;
 
+       bool locked;
+
+       /* Acquire rtnl lock through out this function */
+       rtnl_lock();
+       locked = true;
+
        /* Registered a new RoCE device instance to netdev */
        rc = bnxt_re_register_netdev(rdev);
        if (rc) {
@@ -1374,12 +1347,16 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
                schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
        }
 
+       rtnl_unlock();
+       locked = false;
+
        /* Register ib dev */
        rc = bnxt_re_register_ib(rdev);
        if (rc) {
                pr_err("Failed to register with IB: %#x\n", rc);
                goto fail;
        }
+       set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
        dev_info(rdev_to_dev(rdev), "Device registered successfully");
        for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
                rc = device_create_file(&rdev->ibdev.dev,
@@ -1395,7 +1372,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
                        goto fail;
                }
        }
-       set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
        ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
                         &rdev->active_width);
        set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
@@ -1404,17 +1380,21 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
 
        return 0;
 free_sctx:
-       bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, true);
+       bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
 free_ctx:
        bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
 disable_rcfw:
        bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
 free_ring:
-       bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, true);
+       bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
 free_rcfw:
        bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
 fail:
-       bnxt_re_ib_unreg(rdev, true);
+       if (!locked)
+               rtnl_lock();
+       bnxt_re_ib_unreg(rdev);
+       rtnl_unlock();
+
        return rc;
 }
 
@@ -1567,7 +1547,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
                 */
                if (atomic_read(&rdev->sched_count) > 0)
                        goto exit;
-               bnxt_re_ib_unreg(rdev, false);
+               bnxt_re_ib_unreg(rdev);
                bnxt_re_remove_one(rdev);
                bnxt_re_dev_unreg(rdev);
                break;
@@ -1646,7 +1626,10 @@ static void __exit bnxt_re_mod_exit(void)
                 */
                flush_workqueue(bnxt_re_wq);
                bnxt_re_dev_stop(rdev);
-               bnxt_re_ib_unreg(rdev, true);
+               /* Acquire the rtnl_lock as the L2 resources are freed here */
+               rtnl_lock();
+               bnxt_re_ib_unreg(rdev);
+               rtnl_unlock();
                bnxt_re_remove_one(rdev);
                bnxt_re_dev_unreg(rdev);
        }
index 2c19bf772451bfef693eaad1fa5a678e5cc9e067..e1668bcc2d13d71aba2f35021ec25cd693e79682 100644 (file)
@@ -6733,6 +6733,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
        struct hfi1_devdata *dd = ppd->dd;
        struct send_context *sc;
        int i;
+       int sc_flags;
 
        if (flags & FREEZE_SELF)
                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
@@ -6743,11 +6744,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
        /* notify all SDMA engines that they are going into a freeze */
        sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
 
+       sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
+                                             SCF_LINK_DOWN : 0);
        /* do halt pre-handling on all enabled send contexts */
        for (i = 0; i < dd->num_send_contexts; i++) {
                sc = dd->send_contexts[i].sc;
                if (sc && (sc->flags & SCF_ENABLED))
-                       sc_stop(sc, SCF_FROZEN | SCF_HALTED);
+                       sc_stop(sc, sc_flags);
        }
 
        /* Send context are frozen. Notify user space */
@@ -10674,6 +10677,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
                add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
 
                handle_linkup_change(dd, 1);
+               pio_kernel_linkup(dd);
 
                /*
                 * After link up, a new link width will have been set.
index c2c1cba5b23be440bc292a205ca523d6962c4233..752057647f091734368f998c0173234d8a1f2dd0 100644 (file)
@@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
        unsigned long flags;
        int write = 1;  /* write sendctrl back */
        int flush = 0;  /* re-read sendctrl to make sure it is flushed */
+       int i;
 
        spin_lock_irqsave(&dd->sendctrl_lock, flags);
 
@@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
                reg |= SEND_CTRL_SEND_ENABLE_SMASK;
        /* Fall through */
        case PSC_DATA_VL_ENABLE:
+               mask = 0;
+               for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
+                       if (!dd->vld[i].mtu)
+                               mask |= BIT_ULL(i);
                /* Disallow sending on VLs not enabled */
-               mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
-                               SEND_CTRL_UNSUPPORTED_VL_SHIFT;
+               mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
+                       SEND_CTRL_UNSUPPORTED_VL_SHIFT;
                reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
                break;
        case PSC_GLOBAL_DISABLE:
@@ -921,20 +926,18 @@ void sc_free(struct send_context *sc)
 void sc_disable(struct send_context *sc)
 {
        u64 reg;
-       unsigned long flags;
        struct pio_buf *pbuf;
 
        if (!sc)
                return;
 
        /* do all steps, even if already disabled */
-       spin_lock_irqsave(&sc->alloc_lock, flags);
+       spin_lock_irq(&sc->alloc_lock);
        reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
        reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
        sc->flags &= ~SCF_ENABLED;
        sc_wait_for_packet_egress(sc, 1);
        write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
-       spin_unlock_irqrestore(&sc->alloc_lock, flags);
 
        /*
         * Flush any waiters.  Once the context is disabled,
@@ -944,7 +947,7 @@ void sc_disable(struct send_context *sc)
         * proceed with the flush.
         */
        udelay(1);
-       spin_lock_irqsave(&sc->release_lock, flags);
+       spin_lock(&sc->release_lock);
        if (sc->sr) {   /* this context has a shadow ring */
                while (sc->sr_tail != sc->sr_head) {
                        pbuf = &sc->sr[sc->sr_tail].pbuf;
@@ -955,7 +958,8 @@ void sc_disable(struct send_context *sc)
                                sc->sr_tail = 0;
                }
        }
-       spin_unlock_irqrestore(&sc->release_lock, flags);
+       spin_unlock(&sc->release_lock);
+       spin_unlock_irq(&sc->alloc_lock);
 }
 
 /* return SendEgressCtxtStatus.PacketOccupancy */
@@ -1178,11 +1182,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd)
                sc = dd->send_contexts[i].sc;
                if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
                        continue;
+               if (sc->flags & SCF_LINK_DOWN)
+                       continue;
 
                sc_enable(sc);  /* will clear the sc frozen flag */
        }
 }
 
+/**
+ * pio_kernel_linkup() - Re-enable send contexts after linkup event
+ * @dd: valid devive data
+ *
+ * When the link goes down, the freeze path is taken.  However, a link down
+ * event is different from a freeze because if the send context is re-enabled
+ * whowever is sending data will start sending data again, which will hang
+ * any QP that is sending data.
+ *
+ * The freeze path now looks at the type of event that occurs and takes this
+ * path for link down event.
+ */
+void pio_kernel_linkup(struct hfi1_devdata *dd)
+{
+       struct send_context *sc;
+       int i;
+
+       for (i = 0; i < dd->num_send_contexts; i++) {
+               sc = dd->send_contexts[i].sc;
+               if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
+                       continue;
+
+               sc_enable(sc);  /* will clear the sc link down flag */
+       }
+}
+
 /*
  * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
  * Returns:
@@ -1382,11 +1414,10 @@ void sc_stop(struct send_context *sc, int flag)
 {
        unsigned long flags;
 
-       /* mark the context */
-       sc->flags |= flag;
-
        /* stop buffer allocations */
        spin_lock_irqsave(&sc->alloc_lock, flags);
+       /* mark the context */
+       sc->flags |= flag;
        sc->flags &= ~SCF_ENABLED;
        spin_unlock_irqrestore(&sc->alloc_lock, flags);
        wake_up(&sc->halt_wait);
index 058b08f459ab7947e53aa5ad676febc9999f7ade..aaf372c3e5d6a3cc0de82aaf9819c02b97bd195f 100644 (file)
@@ -139,6 +139,7 @@ struct send_context {
 #define SCF_IN_FREE 0x02
 #define SCF_HALTED  0x04
 #define SCF_FROZEN  0x08
+#define SCF_LINK_DOWN 0x10
 
 struct send_context_info {
        struct send_context *sc;        /* allocated working context */
@@ -306,6 +307,7 @@ void set_pio_integrity(struct send_context *sc);
 void pio_reset_all(struct hfi1_devdata *dd);
 void pio_freeze(struct hfi1_devdata *dd);
 void pio_kernel_unfreeze(struct hfi1_devdata *dd);
+void pio_kernel_linkup(struct hfi1_devdata *dd);
 
 /* global PIO send control operations */
 #define PSC_GLOBAL_ENABLE 0
index a3a7b33196d64158cd069cdf5d32c429f79f7b5d..5c88706121c1cb0faf2da79dae014113f7d0a0d9 100644 (file)
@@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
                        if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
                                if (++req->iov_idx == req->data_iovs) {
                                        ret = -EFAULT;
-                                       goto free_txreq;
+                                       goto free_tx;
                                }
                                iovec = &req->iovs[req->iov_idx];
                                WARN_ON(iovec->offset);
index 13374c727b142d61dc3f6bdb0603f958c6a1a0ca..a7c586a5589d642524f7e659c6a4dfa512eed4e2 100644 (file)
@@ -1582,6 +1582,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
        struct hfi1_pportdata *ppd;
        struct hfi1_devdata *dd;
        u8 sc5;
+       u8 sl;
 
        if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
            !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
@@ -1590,8 +1591,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
        /* test the mapping for validity */
        ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
        ppd = ppd_from_ibp(ibp);
-       sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
        dd = dd_from_ppd(ppd);
+
+       sl = rdma_ah_get_sl(ah_attr);
+       if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
+               return -EINVAL;
+
+       sc5 = ibp->sl_to_sc[sl];
        if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
                return -EINVAL;
        return 0;
index ac116d63e4661adf03662bdfb2cd598cc179ce3c..f2f11e652dcd2a751d10397c8c65d6be8a53b53e 100644 (file)
@@ -723,6 +723,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
                attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
        struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
        struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
        struct devx_obj *obj;
        int err;
 
@@ -754,10 +755,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
 
        err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
        if (err)
-               goto obj_free;
+               goto obj_destroy;
 
        return 0;
 
+obj_destroy:
+       mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
 obj_free:
        kfree(obj);
        return err;
index 444d16520506a1773f01dc0b89087091d867e939..0b34e909505f5fa4c6a1404227a137ae94e54aa9 100644 (file)
@@ -2951,7 +2951,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
 {
        struct srp_target_port *target = host_to_target(scmnd->device->host);
        struct srp_rdma_ch *ch;
-       int i;
+       int i, j;
        u8 status;
 
        shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2965,8 +2965,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
 
        for (i = 0; i < target->ch_count; i++) {
                ch = &target->ch[i];
-               for (i = 0; i < target->req_ring_size; ++i) {
-                       struct srp_request *req = &ch->req_ring[i];
+               for (j = 0; j < target->req_ring_size; ++j) {
+                       struct srp_request *req = &ch->req_ring[j];
 
                        srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
                }
index 6f62da2909ec0f07eb7cd7d1a76122bc11ed13a7..6caee807cafabf6955053ee817c30f70571d3ef5 100644 (file)
@@ -75,8 +75,7 @@ MODULE_LICENSE("GPL");
  */
 
 
-static unsigned char atakbd_keycode[0x72] = {  /* American layout */
-       [0]      = KEY_GRAVE,
+static unsigned char atakbd_keycode[0x73] = {  /* American layout */
        [1]      = KEY_ESC,
        [2]      = KEY_1,
        [3]      = KEY_2,
@@ -117,9 +116,9 @@ static unsigned char atakbd_keycode[0x72] = {       /* American layout */
        [38]     = KEY_L,
        [39]     = KEY_SEMICOLON,
        [40]     = KEY_APOSTROPHE,
-       [41]     = KEY_BACKSLASH,       /* FIXME, '#' */
+       [41]     = KEY_GRAVE,
        [42]     = KEY_LEFTSHIFT,
-       [43]     = KEY_GRAVE,           /* FIXME: '~' */
+       [43]     = KEY_BACKSLASH,
        [44]     = KEY_Z,
        [45]     = KEY_X,
        [46]     = KEY_C,
@@ -145,45 +144,34 @@ static unsigned char atakbd_keycode[0x72] = {     /* American layout */
        [66]     = KEY_F8,
        [67]     = KEY_F9,
        [68]     = KEY_F10,
-       [69]     = KEY_ESC,
-       [70]     = KEY_DELETE,
-       [71]     = KEY_KP7,
-       [72]     = KEY_KP8,
-       [73]     = KEY_KP9,
+       [71]     = KEY_HOME,
+       [72]     = KEY_UP,
        [74]     = KEY_KPMINUS,
-       [75]     = KEY_KP4,
-       [76]     = KEY_KP5,
-       [77]     = KEY_KP6,
+       [75]     = KEY_LEFT,
+       [77]     = KEY_RIGHT,
        [78]     = KEY_KPPLUS,
-       [79]     = KEY_KP1,
-       [80]     = KEY_KP2,
-       [81]     = KEY_KP3,
-       [82]     = KEY_KP0,
-       [83]     = KEY_KPDOT,
-       [90]     = KEY_KPLEFTPAREN,
-       [91]     = KEY_KPRIGHTPAREN,
-       [92]     = KEY_KPASTERISK,      /* FIXME */
-       [93]     = KEY_KPASTERISK,
-       [94]     = KEY_KPPLUS,
-       [95]     = KEY_HELP,
+       [80]     = KEY_DOWN,
+       [82]     = KEY_INSERT,
+       [83]     = KEY_DELETE,
        [96]     = KEY_102ND,
-       [97]     = KEY_KPASTERISK,      /* FIXME */
-       [98]     = KEY_KPSLASH,
+       [97]     = KEY_UNDO,
+       [98]     = KEY_HELP,
        [99]     = KEY_KPLEFTPAREN,
        [100]    = KEY_KPRIGHTPAREN,
        [101]    = KEY_KPSLASH,
        [102]    = KEY_KPASTERISK,
-       [103]    = KEY_UP,
-       [104]    = KEY_KPASTERISK,      /* FIXME */
-       [105]    = KEY_LEFT,
-       [106]    = KEY_RIGHT,
-       [107]    = KEY_KPASTERISK,      /* FIXME */
-       [108]    = KEY_DOWN,
-       [109]    = KEY_KPASTERISK,      /* FIXME */
-       [110]    = KEY_KPASTERISK,      /* FIXME */
-       [111]    = KEY_KPASTERISK,      /* FIXME */
-       [112]    = KEY_KPASTERISK,      /* FIXME */
-       [113]    = KEY_KPASTERISK       /* FIXME */
+       [103]    = KEY_KP7,
+       [104]    = KEY_KP8,
+       [105]    = KEY_KP9,
+       [106]    = KEY_KP4,
+       [107]    = KEY_KP5,
+       [108]    = KEY_KP6,
+       [109]    = KEY_KP1,
+       [110]    = KEY_KP2,
+       [111]    = KEY_KP3,
+       [112]    = KEY_KP0,
+       [113]    = KEY_KPDOT,
+       [114]    = KEY_KPENTER,
 };
 
 static struct input_dev *atakbd_dev;
@@ -191,21 +179,15 @@ static struct input_dev *atakbd_dev;
 static void atakbd_interrupt(unsigned char scancode, char down)
 {
 
-       if (scancode < 0x72) {          /* scancodes < 0xf2 are keys */
+       if (scancode < 0x73) {          /* scancodes < 0xf3 are keys */
 
                // report raw events here?
 
                scancode = atakbd_keycode[scancode];
 
-               if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
-                       input_report_key(atakbd_dev, scancode, 1);
-                       input_report_key(atakbd_dev, scancode, 0);
-                       input_sync(atakbd_dev);
-               } else {
-                       input_report_key(atakbd_dev, scancode, down);
-                       input_sync(atakbd_dev);
-               }
-       } else                          /* scancodes >= 0xf2 are mouse data, most likely */
+               input_report_key(atakbd_dev, scancode, down);
+               input_sync(atakbd_dev);
+       } else                          /* scancodes >= 0xf3 are mouse data, most likely */
                printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
 
        return;
index 96a887f336982f7efc0d971e145ded52829b2dc8..eb14ddf693467b4619a9501aa5e712a9b45dfcdf 100644 (file)
@@ -410,7 +410,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
        min = abs->minimum;
        max = abs->maximum;
 
-       if ((min != 0 || max != 0) && max <= min) {
+       if ((min != 0 || max != 0) && max < min) {
                printk(KERN_DEBUG
                       "%s: invalid abs[%02x] min:%d max:%d\n",
                       UINPUT_NAME, code, min, max);
index 44f57cf6675bbf10ed6fd5bc8b9900936e76a086..2d95e8d93cc761aefb102217473d940faf1e4d02 100644 (file)
@@ -1178,6 +1178,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
 static const char * const middle_button_pnp_ids[] = {
        "LEN2131", /* ThinkPad P52 w/ NFC */
        "LEN2132", /* ThinkPad P52 */
+       "LEN2133", /* ThinkPad P72 w/ NFC */
+       "LEN2134", /* ThinkPad P72 */
        NULL
 };
 
index 80e69bb8283e4b417c677637889654e0b1cadf30..83ac8c128192846f21a9aeb40d80b39869a1c245 100644 (file)
@@ -241,6 +241,9 @@ static int __maybe_unused egalax_ts_suspend(struct device *dev)
        struct i2c_client *client = to_i2c_client(dev);
        int ret;
 
+       if (device_may_wakeup(dev))
+               return enable_irq_wake(client->irq);
+
        ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
        return ret > 0 ? 0 : ret;
 }
@@ -249,6 +252,9 @@ static int __maybe_unused egalax_ts_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
 
+       if (device_may_wakeup(dev))
+               return disable_irq_wake(client->irq);
+
        return egalax_wake_up_device(client);
 }
 
index 4e04fff23977348877cce7a00d4ae394fadf91ef..bee0dfb7b93b1b219b393405d32870cab9964cf1 100644 (file)
@@ -246,7 +246,13 @@ static u16 get_alias(struct device *dev)
 
        /* The callers make sure that get_device_id() does not fail here */
        devid = get_device_id(dev);
+
+       /* For ACPI HID devices, we simply return the devid as such */
+       if (!dev_is_pci(dev))
+               return devid;
+
        ivrs_alias = amd_iommu_alias_table[devid];
+
        pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
 
        if (ivrs_alias == pci_alias)
@@ -3063,7 +3069,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
                return 0;
 
        offset_mask = pte_pgsize - 1;
-       __pte       = *pte & PM_ADDR_MASK;
+       __pte       = __sme_clr(*pte & PM_ADDR_MASK);
 
        return (__pte & ~offset_mask) | (iova & offset_mask);
 }
index 5f3f10cf9d9d0fecb1fc5747c60cbe9d4f9034b1..bedc801b06a0bf2c6745511acbab08e769a54eb2 100644 (file)
@@ -2540,9 +2540,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        if (dev && dev_is_pci(dev) && info->pasid_supported) {
                ret = intel_pasid_alloc_table(dev);
                if (ret) {
-                       __dmar_remove_one_dev_info(info);
-                       spin_unlock_irqrestore(&device_domain_lock, flags);
-                       return NULL;
+                       pr_warn("No pasid table for %s, pasid disabled\n",
+                               dev_name(dev));
+                       info->pasid_supported = 0;
                }
        }
        spin_unlock_irqrestore(&device_domain_lock, flags);
index 1c05ed6fc5a596383c7ed94d7bac69de38d4554d..1fb5e12b029ac717379ec74544c83254ea0c0259 100644 (file)
@@ -11,7 +11,7 @@
 #define __INTEL_PASID_H
 
 #define PASID_MIN                      0x1
-#define PASID_MAX                      0x100000
+#define PASID_MAX                      0x20000
 
 struct pasid_entry {
        u64 val;
index 258115b10fa9e448129d84a0663ab99d51c64ee5..ad3e2b97469ed6de44f3524cafd677825d7afca5 100644 (file)
@@ -1241,6 +1241,12 @@ err_unprepare_clocks:
 
 static void rk_iommu_shutdown(struct platform_device *pdev)
 {
+       struct rk_iommu *iommu = platform_get_drvdata(pdev);
+       int i = 0, irq;
+
+       while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
+               devm_free_irq(iommu->dev, irq, iommu);
+
        pm_runtime_force_suspend(&pdev->dev);
 }
 
index bc208557f7839853b85776a9a6b8acb6d556d41c..c0cbee06bc21164660d93fb32c4110e5866d5735 100644 (file)
@@ -65,7 +65,7 @@ static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
                                cs->respdata[0] = 0;
                                break;
                        }
-                       /* --v-- fall through --v-- */
+                       /* fall through */
                case '\r':
                        /* end of message line, pass to response handler */
                        if (cbytes >= MAX_RESP_SIZE) {
@@ -100,7 +100,7 @@ static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
                                goto exit;
                        }
                        /* quoted or not in DLE mode: treat as regular data */
-                       /* --v-- fall through --v-- */
+                       /* fall through */
                default:
                        /* append to line buffer if possible */
                        if (cbytes < MAX_RESP_SIZE)
index 1cfcea62aed995d701cf2f11bd882d8aae6e1407..182826e9d07c58437cd40a3368f2d177282e83c2 100644 (file)
@@ -1036,7 +1036,7 @@ static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
                break;
        default:
                dev_err(cs->dev, "internal error: disposition=%d\n", retval);
-               /* --v-- fall through --v-- */
+               /* fall through */
        case ICALL_IGNORE:
        case ICALL_REJECT:
                /* hang up actively
@@ -1319,7 +1319,7 @@ static void do_action(int action, struct cardstate *cs,
                        cs->commands_pending = 1;
                        break;
                }
-               /* bad cid: fall through */
+               /* fall through - bad cid */
        case ACT_FAILCID:
                cs->cur_at_seq = SEQ_NONE;
                channel = cs->curchannel;
index 97e00118ccfe8ace07e83526651df27926f2f271..f9264ba0fe770b4fdc0ed364c9c94c9fe0b519c0 100644 (file)
@@ -906,7 +906,7 @@ static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf)
                                cs->respdata[0] = 0;
                                break;
                        }
-                       /* --v-- fall through --v-- */
+                       /* fall through */
                case '\r':
                        /* end of message line, pass to response handler */
                        if (cbytes >= MAX_RESP_SIZE) {
index bb8e4b7e34ea8d7011d57812c90777d680066bdc..36eefaa3a7d9ac0a726b40bcc4dd32e5dfdb379b 100644 (file)
@@ -72,7 +72,7 @@ W6692_new_ph(struct IsdnCardState *cs)
        case (W_L1CMD_RST):
                ph_command(cs, W_L1CMD_DRC);
                l1_msg(cs, HW_RESET | INDICATION, NULL);
-               /* fallthru */
+               /* fall through */
        case (W_L1IND_CD):
                l1_msg(cs, HW_DEACTIVATE | CONFIRM, NULL);
                break;
index 83504dd8100ab2a80d7f0e737e50266de41add32..954dad29e6e8fca910b0ebd24171591f2acd0831 100644 (file)
@@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca);
 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
 
 extern struct workqueue_struct *bcache_wq;
+extern struct workqueue_struct *bch_journal_wq;
 extern struct mutex bch_register_lock;
 extern struct list_head bch_cache_sets;
 
index 6116bbf870d8ef9004717bf5d04803895d085bab..522c7426f3a05cee10df0e984521472849b9b707 100644 (file)
@@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca)
 
                closure_get(&ca->set->cl);
                INIT_WORK(&ja->discard_work, journal_discard_work);
-               schedule_work(&ja->discard_work);
+               queue_work(bch_journal_wq, &ja->discard_work);
        }
 }
 
@@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl)
                : &j->w[0];
 
        __closure_wake_up(&w->wait);
-       continue_at_nobarrier(cl, journal_write, system_wq);
+       continue_at_nobarrier(cl, journal_write, bch_journal_wq);
 }
 
 static void journal_write_unlock(struct closure *cl)
@@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl)
                spin_unlock(&c->journal.lock);
 
                btree_flush_write(c);
-               continue_at(cl, journal_write, system_wq);
+               continue_at(cl, journal_write, bch_journal_wq);
                return;
        }
 
index 94c756c66bd7216a6d83b67eaef891269f965467..30ba9aeb5ee8345ac192e34e51e67beae2127950 100644 (file)
@@ -47,6 +47,7 @@ static int bcache_major;
 static DEFINE_IDA(bcache_device_idx);
 static wait_queue_head_t unregister_wait;
 struct workqueue_struct *bcache_wq;
+struct workqueue_struct *bch_journal_wq;
 
 #define BTREE_MAX_PAGES                (256 * 1024 / PAGE_SIZE)
 /* limitation of partitions number on single bcache device */
@@ -2341,6 +2342,9 @@ static void bcache_exit(void)
                kobject_put(bcache_kobj);
        if (bcache_wq)
                destroy_workqueue(bcache_wq);
+       if (bch_journal_wq)
+               destroy_workqueue(bch_journal_wq);
+
        if (bcache_major)
                unregister_blkdev(bcache_major, "bcache");
        unregister_reboot_notifier(&reboot);
@@ -2370,6 +2374,10 @@ static int __init bcache_init(void)
        if (!bcache_wq)
                goto err;
 
+       bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
+       if (!bch_journal_wq)
+               goto err;
+
        bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
        if (!bcache_kobj)
                goto err;
index 69dddeab124c2e1ac075dc24c14ebe725647e3a3..5936de71883fb7f637b0192bf3144910d6e193d5 100644 (file)
@@ -1455,8 +1455,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
                if (hints_valid) {
                        r = dm_array_cursor_next(&cmd->hint_cursor);
                        if (r) {
-                               DMERR("dm_array_cursor_next for hint failed");
-                               goto out;
+                               dm_array_cursor_end(&cmd->hint_cursor);
+                               hints_valid = false;
                        }
                }
 
index a534133717254a88eb91e025c7731e408fbbdfad..e13d991e9fb52eff6176e2a275c8a8d2342b6701 100644 (file)
@@ -3009,8 +3009,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
 
 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
 {
-       if (from_cblock(new_size) > from_cblock(cache->cache_size))
-               return true;
+       if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+               if (cache->sized) {
+                       DMERR("%s: unable to extend cache due to missing cache table reload",
+                             cache_device_name(cache));
+                       return false;
+               }
+       }
 
        /*
         * We can't drop a dirty block when shrinking the cache.
index d94ba6f72ff59e3723cc67337ff6bca21a472fda..419362c2d8aca1b95e745633f937ecd2a3325aed 100644 (file)
@@ -806,19 +806,19 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 }
 
 static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
-                        const char *attached_handler_name, char **error)
+                        const char **attached_handler_name, char **error)
 {
        struct request_queue *q = bdev_get_queue(bdev);
        int r;
 
        if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
 retain:
-               if (attached_handler_name) {
+               if (*attached_handler_name) {
                        /*
                         * Clear any hw_handler_params associated with a
                         * handler that isn't already attached.
                         */
-                       if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
+                       if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
                                kfree(m->hw_handler_params);
                                m->hw_handler_params = NULL;
                        }
@@ -830,7 +830,8 @@ retain:
                         * handler instead of the original table passed in.
                         */
                        kfree(m->hw_handler_name);
-                       m->hw_handler_name = attached_handler_name;
+                       m->hw_handler_name = *attached_handler_name;
+                       *attached_handler_name = NULL;
                }
        }
 
@@ -867,7 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
        struct pgpath *p;
        struct multipath *m = ti->private;
        struct request_queue *q;
-       const char *attached_handler_name;
+       const char *attached_handler_name = NULL;
 
        /* we need at least a path arg */
        if (as->argc < 1) {
@@ -890,7 +891,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
        attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
        if (attached_handler_name || m->hw_handler_name) {
                INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
-               r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
+               r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
                if (r) {
                        dm_put_device(ti, p->path.dev);
                        goto bad;
@@ -905,6 +906,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
 
        return p;
  bad:
+       kfree(attached_handler_name);
        free_pgpath(p);
        return ERR_PTR(r);
 }
index 5ba067fa0c729bc89b7789bd35648b1189003b27..c44925e4e4813d246d0d208fef5587019e4afb17 100644 (file)
@@ -3353,7 +3353,7 @@ static const char *sync_str(enum sync_state state)
 };
 
 /* Return enum sync_state for @mddev derived from @recovery flags */
-static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
+static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
 {
        if (test_bit(MD_RECOVERY_FROZEN, &recovery))
                return st_frozen;
index 74f6770c70b12404e965345ec561318bf2225fcb..20b0776e39ef3307aa5c418016afff4758850a68 100644 (file)
@@ -832,10 +832,8 @@ static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
        if (r) {
                DMERR("could not get size of metadata device");
                pmd->metadata_reserve = max_blocks;
-       } else {
-               sector_div(total, 10);
-               pmd->metadata_reserve = min(max_blocks, total);
-       }
+       } else
+               pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
 }
 
 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
index 127fe6eb91d9832289124a399b03b2d6c88715c3..a3ef1f50a4b3496dcfbe7cb4332a3bab9a3acc56 100644 (file)
@@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
        if (sev == NULL)
                return;
 
-       /*
-        * If the event has been added to the fh->subscribed list, but its
-        * add op has not completed yet elems will be 0, treat this as
-        * not being subscribed.
-        */
-       if (!sev->elems)
-               return;
-
        /* Increase event sequence number on fh. */
        fh->sequence++;
 
@@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
        struct v4l2_subscribed_event *sev, *found_ev;
        unsigned long flags;
        unsigned i;
+       int ret = 0;
 
        if (sub->type == V4L2_EVENT_ALL)
                return -EINVAL;
@@ -225,31 +218,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
        sev->flags = sub->flags;
        sev->fh = fh;
        sev->ops = ops;
+       sev->elems = elems;
+
+       mutex_lock(&fh->subscribe_lock);
 
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
        found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
-       if (!found_ev)
-               list_add(&sev->list, &fh->subscribed);
        spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
        if (found_ev) {
+               /* Already listening */
                kvfree(sev);
-               return 0; /* Already listening */
+               goto out_unlock;
        }
 
        if (sev->ops && sev->ops->add) {
-               int ret = sev->ops->add(sev, elems);
+               ret = sev->ops->add(sev, elems);
                if (ret) {
-                       sev->ops = NULL;
-                       v4l2_event_unsubscribe(fh, sub);
-                       return ret;
+                       kvfree(sev);
+                       goto out_unlock;
                }
        }
 
-       /* Mark as ready for use */
-       sev->elems = elems;
+       spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+       list_add(&sev->list, &fh->subscribed);
+       spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
-       return 0;
+out_unlock:
+       mutex_unlock(&fh->subscribe_lock);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
 
@@ -288,6 +286,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
                return 0;
        }
 
+       mutex_lock(&fh->subscribe_lock);
+
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 
        sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -305,6 +305,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
        if (sev && sev->ops && sev->ops->del)
                sev->ops->del(sev);
 
+       mutex_unlock(&fh->subscribe_lock);
+
        kvfree(sev);
 
        return 0;
index 3895999bf8805c3c208a4f042cb59b786a6ec44c..c91a7bd3ecfc7d14853b56a8de273d35ac0ff870 100644 (file)
@@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
        INIT_LIST_HEAD(&fh->available);
        INIT_LIST_HEAD(&fh->subscribed);
        fh->sequence = -1;
+       mutex_init(&fh->subscribe_lock);
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_init);
 
@@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
                return;
        v4l_disable_media_source(fh->vdev);
        v4l2_event_unsubscribe_all(fh);
+       mutex_destroy(&fh->subscribe_lock);
        fh->vdev = NULL;
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_exit);
index abf9e884386c4cc42edee4113bb1a989167dba55..f57f5de5420647619714c65861896252700d302c 100644 (file)
@@ -235,7 +235,7 @@ int mmc_of_parse(struct mmc_host *host)
                        host->caps |= MMC_CAP_NEEDS_POLL;
 
                ret = mmc_gpiod_request_cd(host, "cd", 0, true,
-                                          cd_debounce_delay_ms,
+                                          cd_debounce_delay_ms * 1000,
                                           &cd_gpio_invert);
                if (!ret)
                        dev_info(host->parent, "Got CD GPIO\n");
index 2a833686784b6b459d9744b366ef22cb5ca1279c..86803a3a04dc9609a0c55de2df03f3a9e8cb1341 100644 (file)
@@ -271,7 +271,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
        if (debounce) {
                ret = gpiod_set_debounce(desc, debounce);
                if (ret < 0)
-                       ctx->cd_debounce_delay_ms = debounce;
+                       ctx->cd_debounce_delay_ms = debounce / 1000;
        }
 
        if (gpio_invert)
index 890f192dedbdcc9cb693c2c4159c1b0727cad216..5389c48218820166209a7de463c01084366b1fe4 100644 (file)
@@ -498,7 +498,8 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
 
 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
 {
-       if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible &&
+       if ((of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible ||
+           of_device_get_match_data(&pdev->dev) == &of_rcar_r8a7795_compatible) &&
            !soc_device_match(gen3_soc_whitelist))
                return -ENODEV;
 
index 0d87e11e7f1d84537fe43d95249b1bd3a2ce291d..ee28ec9e0abaddd13053da9fda5c0cefc722555d 100644 (file)
@@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
 static void bond_slave_arr_handler(struct work_struct *work);
 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
                                  int mod);
+static void bond_netdev_notify_work(struct work_struct *work);
 
 /*---------------------------- General routines -----------------------------*/
 
@@ -1170,9 +1171,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
                }
        }
 
-       /* don't change skb->dev for link-local packets */
-       if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+       /* Link-local multicast packets should be passed to the
+        * stack on the link they arrive as well as pass them to the
+        * bond-master device. These packets are mostly usable when
+        * stack receives it with the link on which they arrive
+        * (e.g. LLDP) they also must be available on master. Some of
+        * the use cases include (but are not limited to): LLDP agents
+        * that must be able to operate both on enslaved interfaces as
+        * well as on bonds themselves; linux bridges that must be able
+        * to process/pass BPDUs from attached bonds when any kind of
+        * STP version is enabled on the network.
+        */
+       if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
+               struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+               if (nskb) {
+                       nskb->dev = bond->dev;
+                       nskb->queue_mapping = 0;
+                       netif_rx(nskb);
+               }
                return RX_HANDLER_PASS;
+       }
        if (bond_should_deliver_exact_match(skb, slave, bond))
                return RX_HANDLER_EXACT;
 
@@ -1269,6 +1288,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
                        return NULL;
                }
        }
+       INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+
        return slave;
 }
 
@@ -1276,6 +1297,7 @@ static void bond_free_slave(struct slave *slave)
 {
        struct bonding *bond = bond_get_bond_by_slave(slave);
 
+       cancel_delayed_work_sync(&slave->notify_work);
        if (BOND_MODE(bond) == BOND_MODE_8023AD)
                kfree(SLAVE_AD_INFO(slave));
 
@@ -1297,39 +1319,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
        info->link_failure_count = slave->link_failure_count;
 }
 
-static void bond_netdev_notify(struct net_device *dev,
-                              struct netdev_bonding_info *info)
-{
-       rtnl_lock();
-       netdev_bonding_info_change(dev, info);
-       rtnl_unlock();
-}
-
 static void bond_netdev_notify_work(struct work_struct *_work)
 {
-       struct netdev_notify_work *w =
-               container_of(_work, struct netdev_notify_work, work.work);
+       struct slave *slave = container_of(_work, struct slave,
+                                          notify_work.work);
+
+       if (rtnl_trylock()) {
+               struct netdev_bonding_info binfo;
 
-       bond_netdev_notify(w->dev, &w->bonding_info);
-       dev_put(w->dev);
-       kfree(w);
+               bond_fill_ifslave(slave, &binfo.slave);
+               bond_fill_ifbond(slave->bond, &binfo.master);
+               netdev_bonding_info_change(slave->dev, &binfo);
+               rtnl_unlock();
+       } else {
+               queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
+       }
 }
 
 void bond_queue_slave_event(struct slave *slave)
 {
-       struct bonding *bond = slave->bond;
-       struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
-
-       if (!nnw)
-               return;
-
-       dev_hold(slave->dev);
-       nnw->dev = slave->dev;
-       bond_fill_ifslave(slave, &nnw->bonding_info.slave);
-       bond_fill_ifbond(bond, &nnw->bonding_info.master);
-       INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
-
-       queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+       queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
 }
 
 void bond_lower_state_changed(struct slave *slave)
index e83ebfafd881fac51e2ae968936b2fc52604fb44..d32469283f972964542e2f605645507724d39970 100644 (file)
@@ -31,6 +31,7 @@ config B53_MMAP_DRIVER
 config B53_SRAB_DRIVER
        tristate "B53 SRAB connected switch driver"
        depends on B53 && HAS_IOMEM
+       depends on B53_SERDES || !B53_SERDES
        default ARCH_BCM_IPROC
        help
          Select to enable support for memory-mapped Switch Register Access
index 700d86dd5e130b46531cb2b5da6bfa353f310989..0e4bbdcc614f073c7ec7dbcbb3a83291dbeadbf0 100644 (file)
@@ -1291,7 +1291,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
                b53_get_vlan_entry(dev, vid, vl);
 
                vl->members |= BIT(port);
-               if (untagged)
+               if (untagged && !dsa_is_cpu_port(ds, port))
                        vl->untag |= BIT(port);
                else
                        vl->untag &= ~BIT(port);
@@ -1333,7 +1333,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
                                pvid = 0;
                }
 
-               if (untagged)
+               if (untagged && !dsa_is_cpu_port(ds, port))
                        vl->untag &= ~(BIT(port));
 
                b53_set_vlan_entry(dev, vid, vl);
index 62e486652e622074b1ca1615e02a6cf029cc667b..a5de9bffe5bec940a83186d731bfe77ac23f97d3 100644 (file)
@@ -658,11 +658,7 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
                        if (phydev->asym_pause)
                                rmt_adv |= LPA_PAUSE_ASYM;
 
-                       if (phydev->advertising & ADVERTISED_Pause)
-                               lcl_adv |= ADVERTISE_PAUSE_CAP;
-                       if (phydev->advertising & ADVERTISED_Asym_Pause)
-                               lcl_adv |= ADVERTISE_PAUSE_ASYM;
-
+                       lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
                        flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
 
                        if (flowctrl & FLOW_CTRL_TX)
index b2522e84f482564812808d587e197495555348d5..13eb6a4d98d516e716ea71260ddfa526b11f27ba 100644 (file)
@@ -2184,25 +2184,6 @@ error_drop_packet:
        return NETDEV_TX_OK;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ena_netpoll(struct net_device *netdev)
-{
-       struct ena_adapter *adapter = netdev_priv(netdev);
-       int i;
-
-       /* Dont schedule NAPI if the driver is in the middle of reset
-        * or netdev is down.
-        */
-
-       if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
-           test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
-               return;
-
-       for (i = 0; i < adapter->num_queues; i++)
-               napi_schedule(&adapter->ena_napi[i].napi);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
                            struct net_device *sb_dev,
                            select_queue_fallback_t fallback)
@@ -2368,9 +2349,6 @@ static const struct net_device_ops ena_netdev_ops = {
        .ndo_change_mtu         = ena_change_mtu,
        .ndo_set_mac_address    = NULL,
        .ndo_validate_addr      = eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ena_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
 };
 
 static int ena_device_validate_params(struct ena_adapter *adapter,
index 29ebbf58201083df604b86b6daf2adead98e8823..9f23703dd509f84981596313a1dbb066efa73479 100644 (file)
@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
        int i, ret;
        unsigned long esar_base;
        unsigned char *esar;
+       const char *desc;
 
        if (dec_lance_debug && version_printed++ == 0)
                printk(version);
@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
         */
        switch (type) {
        case ASIC_LANCE:
-               printk("%s: IOASIC onboard LANCE", name);
+               desc = "IOASIC onboard LANCE";
                break;
        case PMAD_LANCE:
-               printk("%s: PMAD-AA", name);
+               desc = "PMAD-AA";
                break;
        case PMAX_LANCE:
-               printk("%s: PMAX onboard LANCE", name);
+               desc = "PMAX onboard LANCE";
                break;
        }
        for (i = 0; i < 6; i++)
                dev->dev_addr[i] = esar[i * 4];
 
-       printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
+       printk("%s: %s, addr = %pM, irq = %d\n",
+              name, desc, dev->dev_addr, dev->irq);
 
        dev->netdev_ops = &lance_netdev_ops;
        dev->watchdog_timeo = 5*HZ;
index 289129011b9fc3857bb2562a3a2e052c44ee0e18..151bdb629e8a3b6d9fa53727eeba279e7c6e890d 100644 (file)
@@ -878,8 +878,9 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
        phy_write(phy_data->phydev, 0x04, 0x0d01);
        phy_write(phy_data->phydev, 0x00, 0x9140);
 
-       phy_data->phydev->supported = PHY_GBIT_FEATURES;
-       phy_data->phydev->advertising = phy_data->phydev->supported;
+       phy_data->phydev->supported = PHY_10BT_FEATURES |
+                                     PHY_100BT_FEATURES |
+                                     PHY_1000BT_FEATURES;
        phy_support_asym_pause(phy_data->phydev);
 
        netif_dbg(pdata, drv, pdata->netdev,
@@ -950,8 +951,9 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
        reg = phy_read(phy_data->phydev, 0x00);
        phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
 
-       phy_data->phydev->supported = PHY_GBIT_FEATURES;
-       phy_data->phydev->advertising = phy_data->phydev->supported;
+       phy_data->phydev->supported = (PHY_10BT_FEATURES |
+                                      PHY_100BT_FEATURES |
+                                      PHY_1000BT_FEATURES);
        phy_support_asym_pause(phy_data->phydev);
 
        netif_dbg(pdata, drv, pdata->netdev,
@@ -1495,10 +1497,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
        if (!phy_data->phydev)
                return;
 
-       if (phy_data->phydev->advertising & ADVERTISED_Pause)
-               lcl_adv |= ADVERTISE_PAUSE_CAP;
-       if (phy_data->phydev->advertising & ADVERTISED_Asym_Pause)
-               lcl_adv |= ADVERTISE_PAUSE_ASYM;
+       lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising);
 
        if (phy_data->phydev->pause) {
                XGBE_SET_LP_ADV(lks, Pause);
index 0dd59b09060bfc7c66540841be6a7837506f6170..7def1cb8ab9d0a620b58f880d7d6e3f7682c4e89 100644 (file)
@@ -870,8 +870,8 @@ err_exit:
        return err;
 }
 
-int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
-                     u8 *mac)
+static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
+                            u8 *mac)
 {
        struct hw_atl_utils_fw_rpc *prpc = NULL;
        unsigned int rpc_size = 0U;
index 147045757b103309e2656f36bc3337acef7b383f..4122553e224b294d4eff1828201e467fcc5a60b9 100644 (file)
@@ -126,8 +126,8 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
 }
 
 /* Ethtool operations */
-static int bcm_sysport_set_rx_csum(struct net_device *dev,
-                                  netdev_features_t wanted)
+static void bcm_sysport_set_rx_csum(struct net_device *dev,
+                                   netdev_features_t wanted)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        u32 reg;
@@ -157,12 +157,10 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
                reg &= ~RXCHK_BRCM_TAG_EN;
 
        rxchk_writel(priv, reg, RXCHK_CONTROL);
-
-       return 0;
 }
 
-static int bcm_sysport_set_tx_csum(struct net_device *dev,
-                                  netdev_features_t wanted)
+static void bcm_sysport_set_tx_csum(struct net_device *dev,
+                                   netdev_features_t wanted)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        u32 reg;
@@ -177,23 +175,24 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev,
        else
                reg &= ~tdma_control_bit(priv, TSB_EN);
        tdma_writel(priv, reg, TDMA_CONTROL);
-
-       return 0;
 }
 
 static int bcm_sysport_set_features(struct net_device *dev,
                                    netdev_features_t features)
 {
-       netdev_features_t changed = features ^ dev->features;
-       netdev_features_t wanted = dev->wanted_features;
-       int ret = 0;
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
 
-       if (changed & NETIF_F_RXCSUM)
-               ret = bcm_sysport_set_rx_csum(dev, wanted);
-       if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
-               ret = bcm_sysport_set_tx_csum(dev, wanted);
+       /* Read CRC forward */
+       if (!priv->is_lite)
+               priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+       else
+               priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
+                                 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
 
-       return ret;
+       bcm_sysport_set_rx_csum(dev, features);
+       bcm_sysport_set_tx_csum(dev, features);
+
+       return 0;
 }
 
 /* Hardware counters must be kept in sync because the order/offset
@@ -285,6 +284,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
        STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
        STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
        STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
+       STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
+       STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
        /* Per TX-queue statistics are dynamically appended */
 };
 
@@ -1069,9 +1070,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
 {
        u32 reg;
 
-       /* Stop monitoring MPD interrupt */
-       intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
        /* Disable RXCHK, active filters and Broadcom tag matching */
        reg = rxchk_readl(priv, RXCHK_CONTROL);
        reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
@@ -1081,6 +1079,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
        /* Clear the MagicPacket detection logic */
        mpd_enable_set(priv, false);
 
+       reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+       if (reg & INTRL2_0_MPD)
+               netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+       if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+               reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+                                 RXCHK_BRCM_TAG_MATCH_MASK;
+               netdev_info(priv->netdev,
+                           "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+       }
+
        netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
 }
 
@@ -1105,7 +1114,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        struct bcm_sysport_tx_ring *txr;
        unsigned int ring, ring_bit;
-       u32 reg;
 
        priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
                          ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1131,16 +1139,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
        if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
                bcm_sysport_tx_reclaim_all(priv);
 
-       if (priv->irq0_stat & INTRL2_0_MPD)
-               netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
-
-       if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
-               reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
-                                 RXCHK_BRCM_TAG_MATCH_MASK;
-               netdev_info(priv->netdev,
-                           "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
-       }
-
        if (!priv->is_lite)
                goto out;
 
@@ -1221,6 +1219,7 @@ static void bcm_sysport_poll_controller(struct net_device *dev)
 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
                                              struct net_device *dev)
 {
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
        struct sk_buff *nskb;
        struct bcm_tsb *tsb;
        u32 csum_info;
@@ -1231,13 +1230,16 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
        /* Re-allocate SKB if needed */
        if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
                nskb = skb_realloc_headroom(skb, sizeof(*tsb));
-               dev_kfree_skb(skb);
                if (!nskb) {
+                       dev_kfree_skb_any(skb);
+                       priv->mib.tx_realloc_tsb_failed++;
                        dev->stats.tx_errors++;
                        dev->stats.tx_dropped++;
                        return NULL;
                }
+               dev_consume_skb_any(skb);
                skb = nskb;
+               priv->mib.tx_realloc_tsb++;
        }
 
        tsb = skb_push(skb, sizeof(*tsb));
@@ -1973,16 +1975,14 @@ static int bcm_sysport_open(struct net_device *dev)
        else
                gib_set_pad_extension(priv);
 
+       /* Apply features again in case we changed them while interface was
+        * down
+        */
+       bcm_sysport_set_features(dev, dev->features);
+
        /* Set MAC address */
        umac_set_hw_addr(priv, dev->dev_addr);
 
-       /* Read CRC forward */
-       if (!priv->is_lite)
-               priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
-       else
-               priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
-                                 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
-
        phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
                                0, priv->phy_interface);
        if (!phydev) {
@@ -2511,9 +2511,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        dev->netdev_ops = &bcm_sysport_netdev_ops;
        netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
 
-       /* HW supported features, none enabled by default */
-       dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
-                               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
+                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       dev->hw_features |= dev->features;
+       dev->vlan_features |= dev->features;
 
        /* Request the WOL interrupt and advertise suspend if available */
        priv->wol_irq_disabled = 1;
@@ -2641,9 +2642,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
        /* UniMAC receive needs to be turned on */
        umac_enable_set(priv, CMD_RX_EN, 1);
 
-       /* Enable the interrupt wake-up source */
-       intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
        netif_dbg(priv, wol, ndev, "entered WOL mode\n");
 
        return 0;
@@ -2716,7 +2714,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
        struct net_device *dev = dev_get_drvdata(d);
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        unsigned int i;
-       u32 reg;
        int ret;
 
        if (!netif_running(dev))
@@ -2760,12 +2757,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
                goto out_free_rx_ring;
        }
 
-       /* Enable rxhck */
-       if (priv->rx_chk_en) {
-               reg = rxchk_readl(priv, RXCHK_CONTROL);
-               reg |= RXCHK_EN;
-               rxchk_writel(priv, reg, RXCHK_CONTROL);
-       }
+       /* Restore enabled features */
+       bcm_sysport_set_features(dev, dev->features);
 
        rbuf_init(priv);
 
index 046c6c1d97fd705608ab14cbac129c529b5548a7..a7a230884a87116c251404e95c01ae27be47c402 100644 (file)
@@ -607,6 +607,8 @@ struct bcm_sysport_mib {
        u32 alloc_rx_buff_failed;
        u32 rx_dma_failed;
        u32 tx_dma_failed;
+       u32 tx_realloc_tsb;
+       u32 tx_realloc_tsb_failed;
 };
 
 /* HW maintains a large list of counters */
index 61957b0bbd8c9f46773ff26ac9d14759b96c3960..e2d92548226ad01eda69694876a85478e539c930 100644 (file)
@@ -1884,8 +1884,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
                        tx_pkts++;
                        /* return full budget so NAPI will complete. */
-                       if (unlikely(tx_pkts > bp->tx_wake_thresh))
+                       if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
                                rx_pkts = budget;
+                               raw_cons = NEXT_RAW_CMP(raw_cons);
+                               break;
+                       }
                } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
                        if (likely(budget))
                                rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
@@ -1913,7 +1916,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                }
                raw_cons = NEXT_RAW_CMP(raw_cons);
 
-               if (rx_pkts == budget)
+               if (rx_pkts && rx_pkts == budget)
                        break;
        }
 
@@ -2027,8 +2030,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
        while (1) {
                work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
 
-               if (work_done >= budget)
+               if (work_done >= budget) {
+                       if (!budget)
+                               BNXT_CP_DB_REARM(cpr->cp_doorbell,
+                                                cpr->cp_raw_cons);
                        break;
+               }
 
                if (!bnxt_has_work(bp, cpr)) {
                        if (napi_complete_done(napi, work_done))
@@ -3010,10 +3017,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
 {
        struct pci_dev *pdev = bp->pdev;
 
-       dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
-                         bp->hwrm_cmd_resp_dma_addr);
-
-       bp->hwrm_cmd_resp_addr = NULL;
+       if (bp->hwrm_cmd_resp_addr) {
+               dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+                                 bp->hwrm_cmd_resp_dma_addr);
+               bp->hwrm_cmd_resp_addr = NULL;
+       }
 }
 
 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -4643,7 +4651,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
                                      FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
                enables |= ring_grps ?
                           FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
-               enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+               enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
 
                req->num_rx_rings = cpu_to_le16(rx_rings);
                req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -8614,7 +8622,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
        *max_tx = hw_resc->max_tx_rings;
        *max_rx = hw_resc->max_rx_rings;
        *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
-                       hw_resc->max_irqs);
+                       hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
        *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
        max_ring_grps = hw_resc->max_hw_ring_grps;
        if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -9050,6 +9058,7 @@ init_err_cleanup_tc:
        bnxt_clear_int_mode(bp);
 
 init_err_pci_clean:
+       bnxt_free_hwrm_resources(bp);
        bnxt_cleanup_pci(bp);
 
 init_err_free:
index ddc98c359488c29e03defe70ebac5a03d6a9415d..a85d2be986af48a7143566a5aa9739129275b126 100644 (file)
@@ -98,13 +98,13 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
        for (i = 0; i < max_tc; i++) {
-               u8 qidx;
+               u8 qidx = bp->tc_to_qidx[i];
 
                req.enables |= cpu_to_le32(
-                       QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
+                       QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
+                       qidx);
 
                memset(&cos2bw, 0, sizeof(cos2bw));
-               qidx = bp->tc_to_qidx[i];
                cos2bw.queue_id = bp->q_info[qidx].queue_id;
                if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
                        cos2bw.tsa =
index 790c684f08abcab21980cd9a8479f27cf516d464..140dbd62106d66dfcb8d01144de8eb22085c05ba 100644 (file)
@@ -21,9 +21,22 @@ static const struct devlink_ops bnxt_dl_ops = {
 #endif /* CONFIG_BNXT_SRIOV */
 };
 
+enum bnxt_dl_param_id {
+       BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+       BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
+};
+
 static const struct bnxt_dl_nvm_param nvm_params[] = {
        {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
         BNXT_NVM_SHARED_CFG, 1},
+       {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI,
+        BNXT_NVM_SHARED_CFG, 1},
+       {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+        NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10},
+       {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+        NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7},
+       {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
+        BNXT_NVM_SHARED_CFG, 1},
 };
 
 static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
@@ -55,8 +68,22 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
                idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
 
        bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
-       if (nvm_param.num_bits == 1)
-               buf = &val->vbool;
+       switch (bytesize) {
+       case 1:
+               if (nvm_param.num_bits == 1)
+                       buf = &val->vbool;
+               else
+                       buf = &val->vu8;
+               break;
+       case 2:
+               buf = &val->vu16;
+               break;
+       case 4:
+               buf = &val->vu32;
+               break;
+       default:
+               return -EFAULT;
+       }
 
        data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
                                        &data_dma_addr, GFP_KERNEL);
@@ -78,8 +105,12 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
                memcpy(buf, data_addr, bytesize);
 
        dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
-       if (rc)
+       if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
+               netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
+               return -EACCES;
+       } else if (rc) {
                return -EIO;
+       }
        return 0;
 }
 
@@ -88,9 +119,15 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
 {
        struct hwrm_nvm_get_variable_input req = {0};
        struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+       int rc;
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
-       return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+       rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+       if (!rc)
+               if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
+                       ctx->val.vbool = !ctx->val.vbool;
+
+       return rc;
 }
 
 static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
@@ -100,14 +137,55 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
        struct bnxt *bp = bnxt_get_bp_from_dl(dl);
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
+
+       if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
+               ctx->val.vbool = !ctx->val.vbool;
+
        return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
 }
 
+static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
+                                union devlink_param_value val,
+                                struct netlink_ext_ack *extack)
+{
+       int max_val = -1;
+
+       if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX)
+               max_val = BNXT_MSIX_VEC_MAX;
+
+       if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN)
+               max_val = BNXT_MSIX_VEC_MIN_MAX;
+
+       if (val.vu32 > max_val) {
+               NL_SET_ERR_MSG_MOD(extack, "MSIX value is exceeding the range");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static const struct devlink_param bnxt_dl_params[] = {
        DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
                              BIT(DEVLINK_PARAM_CMODE_PERMANENT),
                              bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
                              NULL),
+       DEVLINK_PARAM_GENERIC(IGNORE_ARI,
+                             BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+                             bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+                             NULL),
+       DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX,
+                             BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+                             bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+                             bnxt_dl_msix_validate),
+       DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN,
+                             BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+                             bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+                             bnxt_dl_msix_validate),
+       DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
+                            "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL,
+                            BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+                            bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+                            NULL),
 };
 
 int bnxt_dl_register(struct bnxt *bp)
index 2f68dc048390b84300cefd73a974fe3826bb3fdb..5b6b2c7d97cfadee3644d44ff6cce32caffeaa3e 100644 (file)
@@ -33,8 +33,15 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
        }
 }
 
+#define NVM_OFF_MSIX_VEC_PER_PF_MAX    108
+#define NVM_OFF_MSIX_VEC_PER_PF_MIN    114
+#define NVM_OFF_IGNORE_ARI             164
+#define NVM_OFF_DIS_GRE_VER_CHECK      171
 #define NVM_OFF_ENABLE_SRIOV           401
 
+#define BNXT_MSIX_VEC_MAX      1280
+#define BNXT_MSIX_VEC_MIN_MAX  128
+
 enum bnxt_nvm_dir_type {
        BNXT_NVM_SHARED_CFG = 40,
        BNXT_NVM_PORT_CFG,
index b574fe8e974ea1f36f16b2c048242c3772d7d739..9a25c05aa571e25d65f2c1dc153466a85c416cec 100644 (file)
@@ -521,7 +521,8 @@ int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
        return 0;
 }
 
-int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
+int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
+                            struct netlink_ext_ack *extack)
 {
        struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
        int rc = 0;
index 38b9a75ad7240bbfebecb436ed834ff5d2bad4dd..d7287651422f34c2c94eb37195e8a9326197f6b8 100644 (file)
@@ -30,7 +30,8 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
 
 bool bnxt_dev_is_vf_rep(struct net_device *dev);
 int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
-int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
+int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
+                            struct netlink_ext_ack *extack);
 
 #else
 
index eb96b0613cf6d3ecc750d556e005baba7c2c88a6..825a28e5b544dd7033e3ea1f4d614b8263df5903 100644 (file)
@@ -1732,7 +1732,7 @@ int liquidio_set_fec(struct lio *lio, int on_off)
        if (oct->props[lio->ifidx].fec !=
            oct->props[lio->ifidx].fec_boot) {
                dev_dbg(&oct->pci_dev->dev,
-                       "Reloade driver to chang fec to %s\n",
+                       "Reload driver to change fec to %s\n",
                        oct->props[lio->ifidx].fec ? "on" : "off");
        }
 
@@ -1796,7 +1796,7 @@ int liquidio_get_fec(struct lio *lio)
        if (oct->props[lio->ifidx].fec !=
            oct->props[lio->ifidx].fec_boot) {
                dev_dbg(&oct->pci_dev->dev,
-                       "Reloade driver to chang fec to %s\n",
+                       "Reload driver to change fec to %s\n",
                        oct->props[lio->ifidx].fec ? "on" : "off");
        }
 
index 9d70e5c6157ff480d2263dd94368fe92a46620bc..3d24133e5e495cd254d5c913835b8e00738e937e 100644 (file)
@@ -3144,7 +3144,8 @@ liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
 }
 
 static int
-liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
+liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
+                         struct netlink_ext_ack *extack)
 {
        struct lio_devlink_priv *priv;
        struct octeon_device *oct;
index 8b0a253a18d88af786d0bfbbfb2c8c813f262850..1e82b9efe447191799f8e5c881d32da7e7cf96ec 100644 (file)
@@ -2158,6 +2158,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EPERM;
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
+               if (t.cmd != CHELSIO_SET_QSET_PARAMS)
+                       return -EINVAL;
                if (t.qset_idx >= SGE_QSETS)
                        return -EINVAL;
                if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
@@ -2257,6 +2259,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
 
+               if (t.cmd != CHELSIO_GET_QSET_PARAMS)
+                       return -EINVAL;
+
                /* Display qsets for all ports when offload enabled */
                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
                        q1 = 0;
@@ -2302,6 +2307,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EBUSY;
                if (copy_from_user(&edata, useraddr, sizeof(edata)))
                        return -EFAULT;
+               if (edata.cmd != CHELSIO_SET_QSET_NUM)
+                       return -EINVAL;
                if (edata.val < 1 ||
                        (edata.val > 1 && !(adapter->flags & USING_MSIX)))
                        return -EINVAL;
@@ -2342,6 +2349,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EPERM;
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
+               if (t.cmd != CHELSIO_LOAD_FW)
+                       return -EINVAL;
                /* Check t.len sanity ? */
                fw_data = memdup_user(useraddr + sizeof(t), t.len);
                if (IS_ERR(fw_data))
@@ -2365,6 +2374,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EBUSY;
                if (copy_from_user(&m, useraddr, sizeof(m)))
                        return -EFAULT;
+               if (m.cmd != CHELSIO_SETMTUTAB)
+                       return -EINVAL;
                if (m.nmtus != NMTUS)
                        return -EINVAL;
                if (m.mtus[0] < 81)     /* accommodate SACK */
@@ -2406,6 +2417,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EBUSY;
                if (copy_from_user(&m, useraddr, sizeof(m)))
                        return -EFAULT;
+               if (m.cmd != CHELSIO_SET_PM)
+                       return -EINVAL;
                if (!is_power_of_2(m.rx_pg_sz) ||
                        !is_power_of_2(m.tx_pg_sz))
                        return -EINVAL; /* not power of 2 */
@@ -2439,6 +2452,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EIO;    /* need the memory controllers */
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
+               if (t.cmd != CHELSIO_GET_MEM)
+                       return -EINVAL;
                if ((t.addr & 7) || (t.len & 7))
                        return -EINVAL;
                if (t.mem_id == MEM_CM)
@@ -2491,6 +2506,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EAGAIN;
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
+               if (t.cmd != CHELSIO_SET_TRACE_FILTER)
+                       return -EINVAL;
 
                tp = (const struct trace_params *)&t.sip;
                if (t.config_tx)
index 6ba3104ff7ebbd9eee39477d2a00e5c649fa178f..9bd5f755a0e07d28bbd228eae4d820542d66cc2a 100644 (file)
@@ -300,8 +300,8 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap,
                enum cxgb4_dcb_state_input input =
                        ((pcmd->u.dcb.control.all_syncd_pkd &
                          FW_PORT_CMD_ALL_SYNCD_F)
-                        ? CXGB4_DCB_STATE_FW_ALLSYNCED
-                        : CXGB4_DCB_STATE_FW_INCOMPLETE);
+                        ? CXGB4_DCB_INPUT_FW_ALLSYNCED
+                        : CXGB4_DCB_INPUT_FW_INCOMPLETE);
 
                if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) {
                        dcb_running_version = FW_PORT_CMD_DCB_VERSION_G(
index 02040b99c78a0561da461c1497a26bfb92ce2f40..484ee829009036883efde95754d419930f7e096c 100644 (file)
@@ -67,7 +67,7 @@
        do { \
                if ((__dcb)->dcb_version == FW_PORT_DCB_VER_IEEE) \
                        cxgb4_dcb_state_fsm((__dev), \
-                                           CXGB4_DCB_STATE_FW_ALLSYNCED); \
+                                           CXGB4_DCB_INPUT_FW_ALLSYNCED); \
        } while (0)
 
 /* States we can be in for a port's Data Center Bridging.
index 7fc656680299703439d2e3bb590eaed762cd2afd..52edb688942b3bb1473905954b3e1b37644fd2fd 100644 (file)
@@ -38,7 +38,6 @@
 #include "cxgb4.h"
 #include "sched.h"
 
-/* Spinlock must be held by caller */
 static int t4_sched_class_fw_cmd(struct port_info *pi,
                                 struct ch_sched_params *p,
                                 enum sched_fw_ops op)
@@ -67,7 +66,6 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
        return err;
 }
 
-/* Spinlock must be held by caller */
 static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
                                   enum sched_bind_type type, bool bind)
 {
@@ -163,7 +161,6 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
        if (e && index >= 0) {
                int i = 0;
 
-               spin_lock(&e->lock);
                list_for_each_entry(qe, &e->queue_list, list) {
                        if (i == index)
                                break;
@@ -171,10 +168,8 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
                }
                err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
                                              false);
-               if (err) {
-                       spin_unlock(&e->lock);
-                       goto out;
-               }
+               if (err)
+                       return err;
 
                list_del(&qe->list);
                kvfree(qe);
@@ -182,9 +177,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
                        e->state = SCHED_STATE_UNUSED;
                        memset(&e->info, 0, sizeof(e->info));
                }
-               spin_unlock(&e->lock);
        }
-out:
        return err;
 }
 
@@ -210,10 +203,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
 
        /* Unbind queue from any existing class */
        err = t4_sched_queue_unbind(pi, p);
-       if (err) {
-               kvfree(qe);
-               goto out;
-       }
+       if (err)
+               goto out_err;
 
        /* Bind queue to specified class */
        memset(qe, 0, sizeof(*qe));
@@ -221,18 +212,16 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
        memcpy(&qe->param, p, sizeof(qe->param));
 
        e = &s->tab[qe->param.class];
-       spin_lock(&e->lock);
        err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
-       if (err) {
-               kvfree(qe);
-               spin_unlock(&e->lock);
-               goto out;
-       }
+       if (err)
+               goto out_err;
 
        list_add_tail(&qe->list, &e->queue_list);
        atomic_inc(&e->refcnt);
-       spin_unlock(&e->lock);
-out:
+       return err;
+
+out_err:
+       kvfree(qe);
        return err;
 }
 
@@ -296,8 +285,6 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
                           enum sched_bind_type type)
 {
        struct port_info *pi = netdev2pinfo(dev);
-       struct sched_table *s;
-       int err = 0;
        u8 class_id;
 
        if (!can_sched(dev))
@@ -323,12 +310,8 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
        if (class_id == SCHED_CLS_NONE)
                return -ENOTSUPP;
 
-       s = pi->sched_tbl;
-       write_lock(&s->rw_lock);
-       err = t4_sched_class_bind_unbind_op(pi, arg, type, true);
-       write_unlock(&s->rw_lock);
+       return t4_sched_class_bind_unbind_op(pi, arg, type, true);
 
-       return err;
 }
 
 /**
@@ -343,8 +326,6 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
                             enum sched_bind_type type)
 {
        struct port_info *pi = netdev2pinfo(dev);
-       struct sched_table *s;
-       int err = 0;
        u8 class_id;
 
        if (!can_sched(dev))
@@ -367,12 +348,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
        if (!valid_class_id(dev, class_id))
                return -EINVAL;
 
-       s = pi->sched_tbl;
-       write_lock(&s->rw_lock);
-       err = t4_sched_class_bind_unbind_op(pi, arg, type, false);
-       write_unlock(&s->rw_lock);
-
-       return err;
+       return t4_sched_class_bind_unbind_op(pi, arg, type, false);
 }
 
 /* If @p is NULL, fetch any available unused class */
@@ -425,7 +401,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
 static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
                                                struct ch_sched_params *p)
 {
-       struct sched_table *s = pi->sched_tbl;
        struct sched_class *e;
        u8 class_id;
        int err;
@@ -441,7 +416,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
        if (class_id != SCHED_CLS_NONE)
                return NULL;
 
-       write_lock(&s->rw_lock);
        /* See if there's an exisiting class with same
         * requested sched params
         */
@@ -452,27 +426,19 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
                /* Fetch any available unused class */
                e = t4_sched_class_lookup(pi, NULL);
                if (!e)
-                       goto out;
+                       return NULL;
 
                memcpy(&np, p, sizeof(np));
                np.u.params.class = e->idx;
-
-               spin_lock(&e->lock);
                /* New class */
                err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
-               if (err) {
-                       spin_unlock(&e->lock);
-                       e = NULL;
-                       goto out;
-               }
+               if (err)
+                       return NULL;
                memcpy(&e->info, &np, sizeof(e->info));
                atomic_set(&e->refcnt, 0);
                e->state = SCHED_STATE_ACTIVE;
-               spin_unlock(&e->lock);
        }
 
-out:
-       write_unlock(&s->rw_lock);
        return e;
 }
 
@@ -517,14 +483,12 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
                return NULL;
 
        s->sched_size = sched_size;
-       rwlock_init(&s->rw_lock);
 
        for (i = 0; i < s->sched_size; i++) {
                memset(&s->tab[i], 0, sizeof(struct sched_class));
                s->tab[i].idx = i;
                s->tab[i].state = SCHED_STATE_UNUSED;
                INIT_LIST_HEAD(&s->tab[i].queue_list);
-               spin_lock_init(&s->tab[i].lock);
                atomic_set(&s->tab[i].refcnt, 0);
        }
        return s;
@@ -545,11 +509,9 @@ void t4_cleanup_sched(struct adapter *adap)
                for (i = 0; i < s->sched_size; i++) {
                        struct sched_class *e;
 
-                       write_lock(&s->rw_lock);
                        e = &s->tab[i];
                        if (e->state == SCHED_STATE_ACTIVE)
                                t4_sched_class_free(pi, e);
-                       write_unlock(&s->rw_lock);
                }
                kvfree(s);
        }
index 3a49e00a38a1dcdd183c45bae8b898d63aa1d4de..168fb4ce375928481679c73bee35cf47f14d4541 100644 (file)
@@ -69,13 +69,11 @@ struct sched_class {
        u8 idx;
        struct ch_sched_params info;
        struct list_head queue_list;
-       spinlock_t lock; /* Per class lock */
        atomic_t refcnt;
 };
 
 struct sched_table {      /* per port scheduling table */
        u8 sched_size;
-       rwlock_t rw_lock; /* Table lock */
        struct sched_class tab[0];
 };
 
index f85eab57e9e1dccba2b052f1e65b6e6de9b414f3..cb523949c81221e91daa73dd64a6eebc6549ca6a 100644 (file)
@@ -4204,6 +4204,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
  */
 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
 {
+       unsigned int fw_caps = adap->params.fw_caps_support;
        struct fw_port_cmd c;
 
        memset(&c, 0, sizeof(c));
@@ -4211,9 +4212,14 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
                                     FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
                                     FW_PORT_CMD_PORTID_V(port));
        c.action_to_len16 =
-               cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+               cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
+                                                ? FW_PORT_ACTION_L1_CFG
+                                                : FW_PORT_ACTION_L1_CFG32) |
                            FW_LEN16(c));
-       c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG);
+       if (fw_caps == FW_CAPS16)
+               c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
+       else
+               c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
index b8f75a22fb6c97183d89c97f7ce2a9f0cd5ead5d..f152da1ce0464c5c065010813213e5f60eb111af 100644 (file)
@@ -753,7 +753,6 @@ struct cpl_abort_req_rss {
 };
 
 struct cpl_abort_req_rss6 {
-       WR_HDR;
        union opcode_tid ot;
        __be32 srqidx_status;
 };
index 74d122616e76a2d876793a610dd6672e3e8b9910..534787291b44f17a6d47c480e35ee7da5e2e6319 100644 (file)
@@ -4002,8 +4002,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
        netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                                   NETIF_F_TSO | NETIF_F_TSO6 |
                                   NETIF_F_GSO_UDP_TUNNEL;
-       netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
-       netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
 
        dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
                 be16_to_cpu(port));
@@ -4025,8 +4023,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
        adapter->vxlan_port = 0;
 
        netdev->hw_enc_features = 0;
-       netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
-       netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
 }
 
 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
@@ -5320,6 +5316,7 @@ static void be_netdev_init(struct net_device *netdev)
        struct be_adapter *adapter = netdev_priv(netdev);
 
        netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+               NETIF_F_GSO_UDP_TUNNEL |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
                NETIF_F_HW_VLAN_CTAG_TX;
        if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
index 7a30276e1ba6f6f61646ab84ecd65c824811572d..d3a62bc1f1c680c626dd786addee425c65de9ae5 100644 (file)
@@ -96,13 +96,6 @@ config GIANFAR
          on the 8540.
 
 source "drivers/net/ethernet/freescale/dpaa/Kconfig"
-
-config FSL_DPAA2_ETH
-       tristate "Freescale DPAA2 Ethernet"
-       depends on FSL_MC_BUS && FSL_MC_DPIO
-       depends on NETDEVICES && ETHERNET
-       ---help---
-         Ethernet driver for Freescale DPAA2 SoCs, using the
-         Freescale MC bus driver
+source "drivers/net/ethernet/freescale/dpaa2/Kconfig"
 
 endif # NET_VENDOR_FREESCALE
index 84843de25c7b19163b7a55578e7f8ebb655999c7..6e0f47f2c8a3754e7776dd3acef403f4848e1f1a 100644 (file)
@@ -2731,8 +2731,6 @@ out_error:
        return err;
 }
 
-static const struct of_device_id dpaa_match[];
-
 static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
 {
        u16 headroom;
index 5d0fdf667b8245e17510129d4fef2db0852e5452..13d6e2272ece63e52c0c41240228823b47f8d9a3 100644 (file)
@@ -182,7 +182,6 @@ static int dpaa_set_pauseparam(struct net_device *net_dev,
        struct phy_device *phydev;
        bool rx_pause, tx_pause;
        struct dpaa_priv *priv;
-       u32 newadv, oldadv;
        int err;
 
        priv = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
new file mode 100644 (file)
index 0000000..a7f365d
--- /dev/null
@@ -0,0 +1,17 @@
+config FSL_DPAA2_ETH
+       tristate "Freescale DPAA2 Ethernet"
+       depends on FSL_MC_BUS && FSL_MC_DPIO
+       depends on ARCH_LAYERSCAPE || COMPILE_TEST
+       help
+         This is the DPAA2 Ethernet driver supporting Freescale SoCs
+         with DPAA2 (DataPath Acceleration Architecture v2).
+         The driver manages network objects discovered on the Freescale
+         MC bus.
+
+config FSL_DPAA2_PTP_CLOCK
+       tristate "Freescale DPAA2 PTP Clock"
+       depends on FSL_DPAA2_ETH && POSIX_TIMERS
+       select PTP_1588_CLOCK
+       help
+         This driver adds support for using the DPAA2 1588 timer module
+         as a PTP clock.
index 9315ecdba6125e8d47842ae6c054d31b10b27097..2f424e0a8225b81dcafee26e27144d6a26b542c5 100644 (file)
@@ -3,9 +3,11 @@
 # Makefile for the Freescale DPAA2 Ethernet controller
 #
 
-obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
+obj-$(CONFIG_FSL_DPAA2_ETH)            += fsl-dpaa2-eth.o
+obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK)      += fsl-dpaa2-ptp.o
 
-fsl-dpaa2-eth-objs    := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-eth-objs     := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-ptp-objs     := dpaa2-ptp.o dprtc.o
 
 # Needed by the tracing framework
 CFLAGS_dpaa2-eth.o := -I$(src)
index c282d5ca06d6bd1ed246dd7a6ec02f5f07a02a96..156080d42a6cc39c77979dd095fff935d2f48b5a 100644 (file)
@@ -289,10 +289,11 @@ err_frame_format:
  *
  * Observance of NAPI budget is not our concern, leaving that to the caller.
  */
-static int consume_frames(struct dpaa2_eth_channel *ch)
+static int consume_frames(struct dpaa2_eth_channel *ch,
+                         enum dpaa2_eth_fq_type *type)
 {
        struct dpaa2_eth_priv *priv = ch->priv;
-       struct dpaa2_eth_fq *fq;
+       struct dpaa2_eth_fq *fq = NULL;
        struct dpaa2_dq *dq;
        const struct dpaa2_fd *fd;
        int cleaned = 0;
@@ -311,12 +312,23 @@ static int consume_frames(struct dpaa2_eth_channel *ch)
 
                fd = dpaa2_dq_fd(dq);
                fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
-               fq->stats.frames++;
 
                fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
                cleaned++;
        } while (!is_last);
 
+       if (!cleaned)
+               return 0;
+
+       fq->stats.frames += cleaned;
+       ch->stats.frames += cleaned;
+
+       /* A dequeue operation only pulls frames from a single queue
+        * into the store. Return the frame queue type as an out param.
+        */
+       if (type)
+               *type = fq->type;
+
        return cleaned;
 }
 
@@ -921,14 +933,16 @@ static int pull_channel(struct dpaa2_eth_channel *ch)
 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
 {
        struct dpaa2_eth_channel *ch;
-       int cleaned = 0, store_cleaned;
        struct dpaa2_eth_priv *priv;
+       int rx_cleaned = 0, txconf_cleaned = 0;
+       enum dpaa2_eth_fq_type type;
+       int store_cleaned;
        int err;
 
        ch = container_of(napi, struct dpaa2_eth_channel, napi);
        priv = ch->priv;
 
-       while (cleaned < budget) {
+       do {
                err = pull_channel(ch);
                if (unlikely(err))
                        break;
@@ -936,30 +950,32 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
                /* Refill pool if appropriate */
                refill_pool(priv, ch, priv->bpid);
 
-               store_cleaned = consume_frames(ch);
-               cleaned += store_cleaned;
+               store_cleaned = consume_frames(ch, &type);
+               if (type == DPAA2_RX_FQ)
+                       rx_cleaned += store_cleaned;
+               else
+                       txconf_cleaned += store_cleaned;
 
-               /* If we have enough budget left for a full store,
-                * try a new pull dequeue, otherwise we're done here
+               /* If we either consumed the whole NAPI budget with Rx frames
+                * or we reached the Tx confirmations threshold, we're done.
                 */
-               if (store_cleaned == 0 ||
-                   cleaned > budget - DPAA2_ETH_STORE_SIZE)
-                       break;
-       }
-
-       if (cleaned < budget && napi_complete_done(napi, cleaned)) {
-               /* Re-enable data available notifications */
-               do {
-                       err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
-                       cpu_relax();
-               } while (err == -EBUSY);
-               WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
-                         ch->nctx.desired_cpu);
-       }
+               if (rx_cleaned >= budget ||
+                   txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI)
+                       return budget;
+       } while (store_cleaned);
 
-       ch->stats.frames += cleaned;
+       /* We didn't consume the entire budget, so finish napi and
+        * re-enable data availability notifications
+        */
+       napi_complete_done(napi, rx_cleaned);
+       do {
+               err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
+               cpu_relax();
+       } while (err == -EBUSY);
+       WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
+                 ch->nctx.desired_cpu);
 
-       return cleaned;
+       return max(rx_cleaned, 1);
 }
 
 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
@@ -1076,7 +1092,7 @@ static u32 drain_channel(struct dpaa2_eth_priv *priv,
 
        do {
                pull_channel(ch);
-               drained = consume_frames(ch);
+               drained = consume_frames(ch, NULL);
                total += drained;
        } while (drained);
 
@@ -1897,6 +1913,11 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
        if (err)
                goto close;
 
+       priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
+                                      dpaa2_eth_fs_count(priv), GFP_KERNEL);
+       if (!priv->cls_rules)
+               goto close;
+
        return 0;
 
 close:
@@ -2004,13 +2025,25 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
 }
 
 /* Supported header fields for Rx hash distribution key */
-static const struct dpaa2_eth_hash_fields hash_fields[] = {
+static const struct dpaa2_eth_dist_fields dist_fields[] = {
        {
                /* L2 header */
                .rxnfc_field = RXH_L2DA,
                .cls_prot = NET_PROT_ETH,
                .cls_field = NH_FLD_ETH_DA,
                .size = 6,
+       }, {
+               .cls_prot = NET_PROT_ETH,
+               .cls_field = NH_FLD_ETH_SA,
+               .size = 6,
+       }, {
+               /* This is the last ethertype field parsed:
+                * depending on frame format, it can be the MAC ethertype
+                * or the VLAN etype.
+                */
+               .cls_prot = NET_PROT_ETH,
+               .cls_field = NH_FLD_ETH_TYPE,
+               .size = 2,
        }, {
                /* VLAN header */
                .rxnfc_field = RXH_VLAN,
@@ -2049,33 +2082,122 @@ static const struct dpaa2_eth_hash_fields hash_fields[] = {
        },
 };
 
-/* Set RX hash options
+/* Configure the Rx hash key using the legacy API */
+static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       struct dpni_rx_tc_dist_cfg dist_cfg;
+       int err;
+
+       memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+       dist_cfg.key_cfg_iova = key;
+       dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+       dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+
+       err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
+       if (err)
+               dev_err(dev, "dpni_set_rx_tc_dist failed\n");
+
+       return err;
+}
+
+/* Configure the Rx hash key using the new API */
+static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       struct dpni_rx_dist_cfg dist_cfg;
+       int err;
+
+       memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+       dist_cfg.key_cfg_iova = key;
+       dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+       dist_cfg.enable = 1;
+
+       err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
+       if (err)
+               dev_err(dev, "dpni_set_rx_hash_dist failed\n");
+
+       return err;
+}
+
+/* Configure the Rx flow classification key */
+static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       struct dpni_rx_dist_cfg dist_cfg;
+       int err;
+
+       memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+       dist_cfg.key_cfg_iova = key;
+       dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+       dist_cfg.enable = 1;
+
+       err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
+       if (err)
+               dev_err(dev, "dpni_set_rx_fs_dist failed\n");
+
+       return err;
+}
+
+/* Size of the Rx flow classification key */
+int dpaa2_eth_cls_key_size(void)
+{
+       int i, size = 0;
+
+       for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+               size += dist_fields[i].size;
+
+       return size;
+}
+
+/* Offset of header field in Rx classification key */
+int dpaa2_eth_cls_fld_off(int prot, int field)
+{
+       int i, off = 0;
+
+       for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+               if (dist_fields[i].cls_prot == prot &&
+                   dist_fields[i].cls_field == field)
+                       return off;
+               off += dist_fields[i].size;
+       }
+
+       WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
+       return 0;
+}
+
+/* Set Rx distribution (hash or flow classification) key
  * flags is a combination of RXH_ bits
  */
-int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+int dpaa2_eth_set_dist_key(struct net_device *net_dev,
+                          enum dpaa2_eth_rx_dist type, u64 flags)
 {
        struct device *dev = net_dev->dev.parent;
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
        struct dpkg_profile_cfg cls_cfg;
-       struct dpni_rx_tc_dist_cfg dist_cfg;
        u32 rx_hash_fields = 0;
+       dma_addr_t key_iova;
        u8 *dma_mem;
        int i;
        int err = 0;
 
-       if (!dpaa2_eth_hash_enabled(priv)) {
-               dev_dbg(dev, "Hashing support is not enabled\n");
-               return -EOPNOTSUPP;
-       }
-
        memset(&cls_cfg, 0, sizeof(cls_cfg));
 
-       for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
+       for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
                struct dpkg_extract *key =
                        &cls_cfg.extracts[cls_cfg.num_extracts];
 
-               if (!(flags & hash_fields[i].rxnfc_field))
-                       continue;
+               /* For Rx hashing key we set only the selected fields.
+                * For Rx flow classification key we set all supported fields
+                */
+               if (type == DPAA2_ETH_RX_DIST_HASH) {
+                       if (!(flags & dist_fields[i].rxnfc_field))
+                               continue;
+                       rx_hash_fields |= dist_fields[i].rxnfc_field;
+               }
 
                if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
                        dev_err(dev, "error adding key extraction rule, too many rules?\n");
@@ -2083,12 +2205,10 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
                }
 
                key->type = DPKG_EXTRACT_FROM_HDR;
-               key->extract.from_hdr.prot = hash_fields[i].cls_prot;
+               key->extract.from_hdr.prot = dist_fields[i].cls_prot;
                key->extract.from_hdr.type = DPKG_FULL_FIELD;
-               key->extract.from_hdr.field = hash_fields[i].cls_field;
+               key->extract.from_hdr.field = dist_fields[i].cls_field;
                cls_cfg.num_extracts++;
-
-               rx_hash_fields |= hash_fields[i].rxnfc_field;
        }
 
        dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
@@ -2098,38 +2218,73 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
        err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
        if (err) {
                dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
-               goto err_prep_key;
+               goto free_key;
        }
 
-       memset(&dist_cfg, 0, sizeof(dist_cfg));
-
        /* Prepare for setting the rx dist */
-       dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
-                                              DPAA2_CLASSIFIER_DMA_SIZE,
-                                              DMA_TO_DEVICE);
-       if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
+       key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
+                                 DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, key_iova)) {
                dev_err(dev, "DMA mapping failed\n");
                err = -ENOMEM;
-               goto err_dma_map;
+               goto free_key;
        }
 
-       dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
-       dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+       if (type == DPAA2_ETH_RX_DIST_HASH) {
+               if (dpaa2_eth_has_legacy_dist(priv))
+                       err = config_legacy_hash_key(priv, key_iova);
+               else
+                       err = config_hash_key(priv, key_iova);
+       } else {
+               err = config_cls_key(priv, key_iova);
+       }
 
-       err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
-       dma_unmap_single(dev, dist_cfg.key_cfg_iova,
-                        DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
-       if (err)
-               dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
-       else
+       dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
+                        DMA_TO_DEVICE);
+       if (!err && type == DPAA2_ETH_RX_DIST_HASH)
                priv->rx_hash_fields = rx_hash_fields;
 
-err_dma_map:
-err_prep_key:
+free_key:
        kfree(dma_mem);
        return err;
 }
 
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+       if (!dpaa2_eth_hash_enabled(priv))
+               return -EOPNOTSUPP;
+
+       return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
+}
+
+static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+
+       /* Check if we actually support Rx flow classification */
+       if (dpaa2_eth_has_legacy_dist(priv)) {
+               dev_dbg(dev, "Rx cls not supported by current MC version\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
+           !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
+               dev_dbg(dev, "Rx cls disabled in DPNI options\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (!dpaa2_eth_hash_enabled(priv)) {
+               dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
+               return -EOPNOTSUPP;
+       }
+
+       priv->rx_cls_enabled = 1;
+
+       return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
+}
+
 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
  * frame queues and channels
  */
@@ -2159,6 +2314,13 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
        if (err && err != -EOPNOTSUPP)
                dev_err(dev, "Failed to configure hashing\n");
 
+       /* Configure the flow classification key; it includes all
+        * supported header fields and cannot be modified at runtime
+        */
+       err = dpaa2_eth_set_cls(priv);
+       if (err && err != -EOPNOTSUPP)
+               dev_err(dev, "Failed to configure Rx classification key\n");
+
        /* Configure handling of error frames */
        err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
        err_cfg.set_frame_annotation = 1;
index 93bc41265e5e9af226bc7301cd1893a5340f4804..452a8e9c4f0e08ebaa743afc6c6979eb38a5de13 100644 (file)
  */
 #define DPAA2_ETH_TAILDROP_THRESH      (64 * 1024)
 
+/* Maximum number of Tx confirmation frames to be processed
+ * in a single NAPI call
+ */
+#define DPAA2_ETH_TXCONF_PER_NAPI      256
+
 /* Buffer quota per queue. Must be large enough such that for minimum sized
  * frames taildrop kicks in before the bpool gets depleted, so we compute
  * how many 64B frames fit inside the taildrop threshold and add a margin
@@ -290,13 +295,18 @@ struct dpaa2_eth_channel {
        struct dpaa2_eth_ch_stats stats;
 };
 
-struct dpaa2_eth_hash_fields {
+struct dpaa2_eth_dist_fields {
        u64 rxnfc_field;
        enum net_prot cls_prot;
        int cls_field;
        int size;
 };
 
+struct dpaa2_eth_cls_rule {
+       struct ethtool_rx_flow_spec fs;
+       u8 in_use;
+};
+
 /* Driver private data */
 struct dpaa2_eth_priv {
        struct net_device *net_dev;
@@ -340,6 +350,8 @@ struct dpaa2_eth_priv {
 
        /* enabled ethtool hashing bits */
        u64 rx_hash_fields;
+       struct dpaa2_eth_cls_rule *cls_rules;
+       u8 rx_cls_enabled;
 };
 
 #define DPAA2_RXH_SUPPORTED    (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
@@ -367,6 +379,24 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
        return priv->dpni_ver_major - ver_major;
 }
 
+/* Minimum firmware version that supports a more flexible API
+ * for configuring the Rx flow hash key
+ */
+#define DPNI_RX_DIST_KEY_VER_MAJOR     7
+#define DPNI_RX_DIST_KEY_VER_MINOR     5
+
+#define dpaa2_eth_has_legacy_dist(priv)                                        \
+       (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR,     \
+                               DPNI_RX_DIST_KEY_VER_MINOR) < 0)
+
+#define dpaa2_eth_fs_count(priv)        \
+       ((priv)->dpni_attrs.fs_entries)
+
+enum dpaa2_eth_rx_dist {
+       DPAA2_ETH_RX_DIST_HASH,
+       DPAA2_ETH_RX_DIST_CLS
+};
+
 /* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
  * the buffer also needs space for its shared info struct, and we need
  * to allocate enough to accommodate hardware alignment restrictions
@@ -410,5 +440,7 @@ static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
 }
 
 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
+int dpaa2_eth_cls_key_size(void);
+int dpaa2_eth_cls_fld_off(int prot, int field);
 
 #endif /* __DPAA2_H */
index ce0d94d8a7d8b1864ab7ba7ad209d3d3d8aa6e4a..26bd5a2bd8ed9b987b35da8eecc7a63b4e9d4d8a 100644 (file)
@@ -224,10 +224,310 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
        *(data + i++) = cdan;
 }
 
+static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
+                        void *key, void *mask)
+{
+       int off;
+
+       if (eth_mask->h_proto) {
+               off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+               *(__be16 *)(key + off) = eth_value->h_proto;
+               *(__be16 *)(mask + off) = eth_mask->h_proto;
+       }
+
+       if (!is_zero_ether_addr(eth_mask->h_source)) {
+               off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
+               ether_addr_copy(key + off, eth_value->h_source);
+               ether_addr_copy(mask + off, eth_mask->h_source);
+       }
+
+       if (!is_zero_ether_addr(eth_mask->h_dest)) {
+               off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
+               ether_addr_copy(key + off, eth_value->h_dest);
+               ether_addr_copy(mask + off, eth_mask->h_dest);
+       }
+
+       return 0;
+}
+
+static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
+                        struct ethtool_usrip4_spec *uip_mask,
+                        void *key, void *mask)
+{
+       int off;
+       u32 tmp_value, tmp_mask;
+
+       if (uip_mask->tos || uip_mask->ip_ver)
+               return -EOPNOTSUPP;
+
+       if (uip_mask->ip4src) {
+               off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
+               *(__be32 *)(key + off) = uip_value->ip4src;
+               *(__be32 *)(mask + off) = uip_mask->ip4src;
+       }
+
+       if (uip_mask->ip4dst) {
+               off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
+               *(__be32 *)(key + off) = uip_value->ip4dst;
+               *(__be32 *)(mask + off) = uip_mask->ip4dst;
+       }
+
+       if (uip_mask->proto) {
+               off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
+               *(u8 *)(key + off) = uip_value->proto;
+               *(u8 *)(mask + off) = uip_mask->proto;
+       }
+
+       if (uip_mask->l4_4_bytes) {
+               tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
+               tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
+
+               off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+               *(__be16 *)(key + off) = htons(tmp_value >> 16);
+               *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
+
+               off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+               *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
+               *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
+       }
+
+       /* Only apply the rule for IPv4 frames */
+       off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+       *(__be16 *)(key + off) = htons(ETH_P_IP);
+       *(__be16 *)(mask + off) = htons(0xFFFF);
+
+       return 0;
+}
+
+static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
+                       struct ethtool_tcpip4_spec *l4_mask,
+                       void *key, void *mask, u8 l4_proto)
+{
+       int off;
+
+       if (l4_mask->tos)
+               return -EOPNOTSUPP;
+
+       if (l4_mask->ip4src) {
+               off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
+               *(__be32 *)(key + off) = l4_value->ip4src;
+               *(__be32 *)(mask + off) = l4_mask->ip4src;
+       }
+
+       if (l4_mask->ip4dst) {
+               off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
+               *(__be32 *)(key + off) = l4_value->ip4dst;
+               *(__be32 *)(mask + off) = l4_mask->ip4dst;
+       }
+
+       if (l4_mask->psrc) {
+               off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+               *(__be16 *)(key + off) = l4_value->psrc;
+               *(__be16 *)(mask + off) = l4_mask->psrc;
+       }
+
+       if (l4_mask->pdst) {
+               off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+               *(__be16 *)(key + off) = l4_value->pdst;
+               *(__be16 *)(mask + off) = l4_mask->pdst;
+       }
+
+       /* Only apply the rule for IPv4 frames with the specified L4 proto */
+       off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+       *(__be16 *)(key + off) = htons(ETH_P_IP);
+       *(__be16 *)(mask + off) = htons(0xFFFF);
+
+       off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
+       *(u8 *)(key + off) = l4_proto;
+       *(u8 *)(mask + off) = 0xFF;
+
+       return 0;
+}
+
+static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
+                        struct ethtool_flow_ext *ext_mask,
+                        void *key, void *mask)
+{
+       int off;
+
+       if (ext_mask->vlan_etype)
+               return -EOPNOTSUPP;
+
+       if (ext_mask->vlan_tci) {
+               off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
+               *(__be16 *)(key + off) = ext_value->vlan_tci;
+               *(__be16 *)(mask + off) = ext_mask->vlan_tci;
+       }
+
+       return 0;
+}
+
+static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
+                            struct ethtool_flow_ext *ext_mask,
+                            void *key, void *mask)
+{
+       int off;
+
+       if (!is_zero_ether_addr(ext_mask->h_dest)) {
+               off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
+               ether_addr_copy(key + off, ext_value->h_dest);
+               ether_addr_copy(mask + off, ext_mask->h_dest);
+       }
+
+       return 0;
+}
+
+static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
+{
+       int err;
+
+       switch (fs->flow_type & 0xFF) {
+       case ETHER_FLOW:
+               err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
+                                   key, mask);
+               break;
+       case IP_USER_FLOW:
+               err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
+                                   &fs->m_u.usr_ip4_spec, key, mask);
+               break;
+       case TCP_V4_FLOW:
+               err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
+                                  key, mask, IPPROTO_TCP);
+               break;
+       case UDP_V4_FLOW:
+               err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
+                                  key, mask, IPPROTO_UDP);
+               break;
+       case SCTP_V4_FLOW:
+               err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
+                                  &fs->m_u.sctp_ip4_spec, key, mask,
+                                  IPPROTO_SCTP);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       if (err)
+               return err;
+
+       if (fs->flow_type & FLOW_EXT) {
+               err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+               if (err)
+                       return err;
+       }
+
+       if (fs->flow_type & FLOW_MAC_EXT) {
+               err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int do_cls_rule(struct net_device *net_dev,
+                      struct ethtool_rx_flow_spec *fs,
+                      bool add)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct device *dev = net_dev->dev.parent;
+       struct dpni_rule_cfg rule_cfg = { 0 };
+       struct dpni_fs_action_cfg fs_act = { 0 };
+       dma_addr_t key_iova;
+       void *key_buf;
+       int err;
+
+       if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
+           fs->ring_cookie >= dpaa2_eth_queue_count(priv))
+               return -EINVAL;
+
+       rule_cfg.key_size = dpaa2_eth_cls_key_size();
+
+       /* allocate twice the key size, for the actual key and for mask */
+       key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
+       if (!key_buf)
+               return -ENOMEM;
+
+       /* Fill the key and mask memory areas */
+       err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size);
+       if (err)
+               goto free_mem;
+
+       key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
+                                 DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, key_iova)) {
+               err = -ENOMEM;
+               goto free_mem;
+       }
+
+       rule_cfg.key_iova = key_iova;
+       rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
+
+       if (add) {
+               if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+                       fs_act.options |= DPNI_FS_OPT_DISCARD;
+               else
+                       fs_act.flow_id = fs->ring_cookie;
+               err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
+                                       fs->location, &rule_cfg, &fs_act);
+       } else {
+               err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
+                                          &rule_cfg);
+       }
+
+       dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
+
+free_mem:
+       kfree(key_buf);
+
+       return err;
+}
+
+static int update_cls_rule(struct net_device *net_dev,
+                          struct ethtool_rx_flow_spec *new_fs,
+                          int location)
+{
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct dpaa2_eth_cls_rule *rule;
+       int err = -EINVAL;
+
+       if (!priv->rx_cls_enabled)
+               return -EOPNOTSUPP;
+
+       if (location >= dpaa2_eth_fs_count(priv))
+               return -EINVAL;
+
+       rule = &priv->cls_rules[location];
+
+       /* If a rule is present at the specified location, delete it. */
+       if (rule->in_use) {
+               err = do_cls_rule(net_dev, &rule->fs, false);
+               if (err)
+                       return err;
+
+               rule->in_use = 0;
+       }
+
+       /* If no new entry to add, return here */
+       if (!new_fs)
+               return err;
+
+       err = do_cls_rule(net_dev, new_fs, true);
+       if (err)
+               return err;
+
+       rule->in_use = 1;
+       rule->fs = *new_fs;
+
+       return 0;
+}
+
 static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
                               struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
 {
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       int max_rules = dpaa2_eth_fs_count(priv);
+       int i, j = 0;
 
        switch (rxnfc->cmd) {
        case ETHTOOL_GRXFH:
@@ -240,6 +540,31 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
        case ETHTOOL_GRXRINGS:
                rxnfc->data = dpaa2_eth_queue_count(priv);
                break;
+       case ETHTOOL_GRXCLSRLCNT:
+               rxnfc->rule_cnt = 0;
+               for (i = 0; i < max_rules; i++)
+                       if (priv->cls_rules[i].in_use)
+                               rxnfc->rule_cnt++;
+               rxnfc->data = max_rules;
+               break;
+       case ETHTOOL_GRXCLSRULE:
+               if (rxnfc->fs.location >= max_rules)
+                       return -EINVAL;
+               if (!priv->cls_rules[rxnfc->fs.location].in_use)
+                       return -EINVAL;
+               rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
+               break;
+       case ETHTOOL_GRXCLSRLALL:
+               for (i = 0; i < max_rules; i++) {
+                       if (!priv->cls_rules[i].in_use)
+                               continue;
+                       if (j == rxnfc->rule_cnt)
+                               return -EMSGSIZE;
+                       rule_locs[j++] = i;
+               }
+               rxnfc->rule_cnt = j;
+               rxnfc->data = max_rules;
+               break;
        default:
                return -EOPNOTSUPP;
        }
@@ -258,6 +583,12 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
                        return -EOPNOTSUPP;
                err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
                break;
+       case ETHTOOL_SRXCLSRLINS:
+               err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
+               break;
+       case ETHTOOL_SRXCLSRLDEL:
+               err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
+               break;
        default:
                err = -EOPNOTSUPP;
        }
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
new file mode 100644 (file)
index 0000000..84b942b
--- /dev/null
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "dpaa2-ptp.h"
+
+struct ptp_dpaa2_priv {
+       struct fsl_mc_device *ptp_mc_dev;
+       struct ptp_clock *clock;
+       struct ptp_clock_info caps;
+       u32 freq_comp;
+};
+
+/* PTP clock operations */
+static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+       struct ptp_dpaa2_priv *ptp_dpaa2 =
+               container_of(ptp, struct ptp_dpaa2_priv, caps);
+       struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
+       struct device *dev = &mc_dev->dev;
+       u64 adj;
+       u32 diff, tmr_add;
+       int neg_adj = 0;
+       int err = 0;
+
+       if (ppb < 0) {
+               neg_adj = 1;
+               ppb = -ppb;
+       }
+
+       tmr_add = ptp_dpaa2->freq_comp;
+       adj = tmr_add;
+       adj *= ppb;
+       diff = div_u64(adj, 1000000000ULL);
+
+       tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
+
+       err = dprtc_set_freq_compensation(mc_dev->mc_io, 0,
+                                         mc_dev->mc_handle, tmr_add);
+       if (err)
+               dev_err(dev, "dprtc_set_freq_compensation err %d\n", err);
+       return err;
+}
+
+static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       struct ptp_dpaa2_priv *ptp_dpaa2 =
+               container_of(ptp, struct ptp_dpaa2_priv, caps);
+       struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
+       struct device *dev = &mc_dev->dev;
+       s64 now;
+       int err = 0;
+
+       err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now);
+       if (err) {
+               dev_err(dev, "dprtc_get_time err %d\n", err);
+               return err;
+       }
+
+       now += delta;
+
+       err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now);
+       if (err)
+               dev_err(dev, "dprtc_set_time err %d\n", err);
+       return err;
+}
+
+static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+       struct ptp_dpaa2_priv *ptp_dpaa2 =
+               container_of(ptp, struct ptp_dpaa2_priv, caps);
+       struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
+       struct device *dev = &mc_dev->dev;
+       u64 ns;
+       u32 remainder;
+       int err = 0;
+
+       err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns);
+       if (err) {
+               dev_err(dev, "dprtc_get_time err %d\n", err);
+               return err;
+       }
+
+       ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+       ts->tv_nsec = remainder;
+       return err;
+}
+
+static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
+                            const struct timespec64 *ts)
+{
+       struct ptp_dpaa2_priv *ptp_dpaa2 =
+               container_of(ptp, struct ptp_dpaa2_priv, caps);
+       struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
+       struct device *dev = &mc_dev->dev;
+       u64 ns;
+       int err = 0;
+
+       ns = ts->tv_sec * 1000000000ULL;
+       ns += ts->tv_nsec;
+
+       err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns);
+       if (err)
+               dev_err(dev, "dprtc_set_time err %d\n", err);
+       return err;
+}
+
+static const struct ptp_clock_info ptp_dpaa2_caps = {
+       .owner          = THIS_MODULE,
+       .name           = "DPAA2 PTP Clock",
+       .max_adj        = 512000,
+       .n_alarm        = 2,
+       .n_ext_ts       = 2,
+       .n_per_out      = 3,
+       .n_pins         = 0,
+       .pps            = 1,
+       .adjfreq        = ptp_dpaa2_adjfreq,
+       .adjtime        = ptp_dpaa2_adjtime,
+       .gettime64      = ptp_dpaa2_gettime,
+       .settime64      = ptp_dpaa2_settime,
+};
+
+static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
+{
+       struct device *dev = &mc_dev->dev;
+       struct ptp_dpaa2_priv *ptp_dpaa2;
+       u32 tmr_add = 0;
+       int err;
+
+       ptp_dpaa2 = devm_kzalloc(dev, sizeof(*ptp_dpaa2), GFP_KERNEL);
+       if (!ptp_dpaa2)
+               return -ENOMEM;
+
+       err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
+       if (err) {
+               dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+               goto err_exit;
+       }
+
+       err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+                        &mc_dev->mc_handle);
+       if (err) {
+               dev_err(dev, "dprtc_open err %d\n", err);
+               goto err_free_mcp;
+       }
+
+       ptp_dpaa2->ptp_mc_dev = mc_dev;
+
+       err = dprtc_get_freq_compensation(mc_dev->mc_io, 0,
+                                         mc_dev->mc_handle, &tmr_add);
+       if (err) {
+               dev_err(dev, "dprtc_get_freq_compensation err %d\n", err);
+               goto err_close;
+       }
+
+       ptp_dpaa2->freq_comp = tmr_add;
+       ptp_dpaa2->caps = ptp_dpaa2_caps;
+
+       ptp_dpaa2->clock = ptp_clock_register(&ptp_dpaa2->caps, dev);
+       if (IS_ERR(ptp_dpaa2->clock)) {
+               err = PTR_ERR(ptp_dpaa2->clock);
+               goto err_close;
+       }
+
+       dpaa2_phc_index = ptp_clock_index(ptp_dpaa2->clock);
+
+       dev_set_drvdata(dev, ptp_dpaa2);
+
+       return 0;
+
+err_close:
+       dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+err_free_mcp:
+       fsl_mc_portal_free(mc_dev->mc_io);
+err_exit:
+       return err;
+}
+
+static int dpaa2_ptp_remove(struct fsl_mc_device *mc_dev)
+{
+       struct ptp_dpaa2_priv *ptp_dpaa2;
+       struct device *dev = &mc_dev->dev;
+
+       ptp_dpaa2 = dev_get_drvdata(dev);
+       ptp_clock_unregister(ptp_dpaa2->clock);
+
+       dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+       fsl_mc_portal_free(mc_dev->mc_io);
+
+       return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_ptp_match_id_table[] = {
+       {
+               .vendor = FSL_MC_VENDOR_FREESCALE,
+               .obj_type = "dprtc",
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_ptp_match_id_table);
+
+static struct fsl_mc_driver dpaa2_ptp_drv = {
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .owner = THIS_MODULE,
+       },
+       .probe = dpaa2_ptp_probe,
+       .remove = dpaa2_ptp_remove,
+       .match_id_table = dpaa2_ptp_match_id_table,
+};
+
+module_fsl_mc_driver(dpaa2_ptp_drv);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DPAA2 PTP Clock Driver");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
new file mode 100644 (file)
index 0000000..ff2e177
--- /dev/null
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTC_H
+#define __RTC_H
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+extern int dpaa2_phc_index;
+
+#endif
index 83698abce8b443f185bb78ad2e76d371354c7b76..7b44d7d9b19aab9cb6e5806904f3b09377d36ddc 100644 (file)
@@ -82,6 +82,9 @@
 #define DPNI_CMDID_GET_OFFLOAD                         DPNI_CMD(0x26B)
 #define DPNI_CMDID_SET_OFFLOAD                         DPNI_CMD(0x26C)
 
+#define DPNI_CMDID_SET_RX_FS_DIST                      DPNI_CMD(0x273)
+#define DPNI_CMDID_SET_RX_HASH_DIST                    DPNI_CMD(0x274)
+
 /* Macros for accessing command fields smaller than 1byte */
 #define DPNI_MASK(field)       \
        GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
@@ -515,4 +518,52 @@ struct dpni_rsp_get_api_version {
        __le16 minor;
 };
 
+#define DPNI_RX_FS_DIST_ENABLE_SHIFT   0
+#define DPNI_RX_FS_DIST_ENABLE_SIZE    1
+struct dpni_cmd_set_rx_fs_dist {
+       __le16 dist_size;
+       u8 enable;
+       u8 tc;
+       __le16 miss_flow_id;
+       __le16 pad;
+       __le64 key_cfg_iova;
+};
+
+#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_HASH_DIST_ENABLE_SIZE  1
+struct dpni_cmd_set_rx_hash_dist {
+       __le16 dist_size;
+       u8 enable;
+       u8 tc;
+       __le32 pad;
+       __le64 key_cfg_iova;
+};
+
+struct dpni_cmd_add_fs_entry {
+       /* cmd word 0 */
+       __le16 options;
+       u8 tc_id;
+       u8 key_size;
+       __le16 index;
+       __le16 flow_id;
+       /* cmd word 1 */
+       __le64 key_iova;
+       /* cmd word 2 */
+       __le64 mask_iova;
+       /* cmd word 3 */
+       __le64 flc;
+};
+
+struct dpni_cmd_remove_fs_entry {
+       /* cmd word 0 */
+       __le16 pad0;
+       u8 tc_id;
+       u8 key_size;
+       __le32 pad1;
+       /* cmd word 1 */
+       __le64 key_iova;
+       /* cmd word 2 */
+       __le64 mask_iova;
+};
+
 #endif /* _FSL_DPNI_CMD_H */
index d6ac26797ceca2bfb86c6f41353c2c99e1d132fc..220dfc806a246190aa8ea00d245b5f6ebbd85749 100644 (file)
@@ -1598,3 +1598,155 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io,
 
        return 0;
 }
+
+/**
+ * dpni_set_rx_fs_dist() - Set Rx flow steering distribution
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @cfg: Distribution configuration
+ *
+ * If the FS is already enabled with a previous call the classification
+ * key will be changed but all the table rules are kept. If the
+ * existing rules do not match the key the results will not be
+ * predictable. It is the user responsibility to keep key integrity.
+ * If cfg.enable is set to 1 the command will create a flow steering table
+ * and will classify packets according to this table. The packets that
+ * miss all the table rules will be classified according to settings
+ * made in dpni_set_rx_hash_dist()
+ * If cfg.enable is set to 0 the command will clear flow steering table.
+ * The packets will be classified according to settings made in
+ * dpni_set_rx_hash_dist()
+ */
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       const struct dpni_rx_dist_cfg *cfg)
+{
+       struct dpni_cmd_set_rx_fs_dist *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
+       cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+       dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+       cmd_params->tc = cfg->tc;
+       cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
+       cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_hash_dist() - Set Rx hash distribution
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @cfg: Distribution configuration
+ * If cfg.enable is set to 1 the packets will be classified using a hash
+ * function based on the key received in cfg.key_cfg_iova parameter.
+ * If cfg.enable is set to 0 the packets will be sent to the default queue
+ */
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+                         u32 cmd_flags,
+                         u16 token,
+                         const struct dpni_rx_dist_cfg *cfg)
+{
+       struct dpni_cmd_set_rx_hash_dist *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
+       cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+       dpni_set_field(cmd_params->enable, RX_HASH_DIST_ENABLE, cfg->enable);
+       cmd_params->tc = cfg->tc;
+       cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
+ *                     (to select a flow ID)
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @tc_id:     Traffic class selection (0-7)
+ * @index:     Location in the FS table where to insert the entry.
+ *             Only relevant if MASKING is enabled for FS
+ *             classification on this DPNI, it is ignored for exact match.
+ * @cfg:       Flow steering rule to add
+ * @action:    Action to be taken as result of a classification hit
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+                     u32 cmd_flags,
+                     u16 token,
+                     u8 tc_id,
+                     u16 index,
+                     const struct dpni_rule_cfg *cfg,
+                     const struct dpni_fs_action_cfg *action)
+{
+       struct dpni_cmd_add_fs_entry *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
+       cmd_params->tc_id = tc_id;
+       cmd_params->key_size = cfg->key_size;
+       cmd_params->index = cpu_to_le16(index);
+       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+       cmd_params->options = cpu_to_le16(action->options);
+       cmd_params->flow_id = cpu_to_le16(action->flow_id);
+       cmd_params->flc = cpu_to_le64(action->flc);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
+ *                         traffic class
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @tc_id:     Traffic class selection (0-7)
+ * @cfg:       Flow steering rule to remove
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+                        u32 cmd_flags,
+                        u16 token,
+                        u8 tc_id,
+                        const struct dpni_rule_cfg *cfg)
+{
+       struct dpni_cmd_remove_fs_entry *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
+       cmd_params->tc_id = tc_id;
+       cmd_params->key_size = cfg->key_size;
+       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
index b378a00c7c5342349b165365c4f3d96bf879fea7..a521242e23537621cb00708809996ac0e964a1a8 100644 (file)
@@ -628,6 +628,45 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io                   *mc_io,
                        u8                                      tc_id,
                        const struct dpni_rx_tc_dist_cfg        *cfg);
 
+/**
+ * When used for fs_miss_flow_id in function dpni_set_rx_dist,
+ * will signal to dpni to drop all unclassified frames
+ */
+#define DPNI_FS_MISS_DROP              ((uint16_t)-1)
+
+/**
+ * struct dpni_rx_dist_cfg - Rx distribution configuration
+ * @dist_size: distribution size
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ *             the extractions to be used for the distribution key by calling
+ *             dpni_prepare_key_cfg(); relevant only when enable!=0 otherwise
+ *             it can be '0'
+ * @enable: enable/disable the distribution.
+ * @tc: TC id for which distribution is set
+ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
+ *             hash is disabled it will be put into this queue id; use
+ *             DPNI_FS_MISS_DROP to drop frames. The value of this field is
+ *             used only when flow steering distribution is enabled and hash
+ *             distribution is disabled
+ */
+struct dpni_rx_dist_cfg {
+       u16 dist_size;
+       u64 key_cfg_iova;
+       u8 enable;
+       u8 tc;
+       u16 fs_miss_flow_id;
+};
+
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+                         u32 cmd_flags,
+                         u16 token,
+                         const struct dpni_rx_dist_cfg *cfg);
+
 /**
  * enum dpni_dest - DPNI destination types
  * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
@@ -816,6 +855,64 @@ struct dpni_rule_cfg {
        u8      key_size;
 };
 
+/**
+ * Discard matching traffic. If set, this takes precedence over any other
+ * configuration and matching traffic is always discarded.
+ */
+ #define DPNI_FS_OPT_DISCARD            0x1
+
+/**
+ * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
+ * override the FLC value set per queue.
+ * For more details check the Frame Descriptor section in the hardware
+ * documentation.
+ */
+#define DPNI_FS_OPT_SET_FLC            0x2
+
+/**
+ * Indicates whether the 6 lowest significant bits of FLC are used for stash
+ * control. If set, the 6 least significant bits in value are interpreted as
+ * follows:
+ *     - bits 0-1: indicates the number of 64 byte units of context that are
+ *     stashed. FLC value is interpreted as a memory address in this case,
+ *     excluding the 6 LS bits.
+ *     - bits 2-3: indicates the number of 64 byte units of frame annotation
+ *     to be stashed. Annotation is placed at FD[ADDR].
+ *     - bits 4-5: indicates the number of 64 byte units of frame data to be
+ *     stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
+ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
+ */
+#define DPNI_FS_OPT_SET_STASH_CONTROL  0x4
+
+/**
+ * struct dpni_fs_action_cfg - Action configuration for table look-up
+ * @flc:       FLC value for traffic matching this rule. Please check the
+ *             Frame Descriptor section in the hardware documentation for
+ *             more information.
+ * @flow_id:   Identifies the Rx queue used for matching traffic. Supported
+ *             values are in range 0 to num_queue-1.
+ * @options:   Any combination of DPNI_FS_OPT_ values.
+ */
+struct dpni_fs_action_cfg {
+       u64 flc;
+       u16 flow_id;
+       u16 options;
+};
+
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+                     u32 cmd_flags,
+                     u16 token,
+                     u8 tc_id,
+                     u16 index,
+                     const struct dpni_rule_cfg *cfg,
+                     const struct dpni_fs_action_cfg *action);
+
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+                        u32 cmd_flags,
+                        u16 token,
+                        u8 tc_id,
+                        const struct dpni_rule_cfg *cfg);
+
 int dpni_get_api_version(struct fsl_mc_io *mc_io,
                         u32 cmd_flags,
                         u16 *major_ver,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
new file mode 100644 (file)
index 0000000..9af4ac7
--- /dev/null
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef _FSL_DPRTC_CMD_H
+#define _FSL_DPRTC_CMD_H
+
+/* Command versioning */
+#define DPRTC_CMD_BASE_VERSION         1
+#define DPRTC_CMD_ID_OFFSET            4
+
+#define DPRTC_CMD(id)  (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPRTC_CMDID_CLOSE                      DPRTC_CMD(0x800)
+#define DPRTC_CMDID_OPEN                       DPRTC_CMD(0x810)
+
+#define DPRTC_CMDID_SET_FREQ_COMPENSATION      DPRTC_CMD(0x1d1)
+#define DPRTC_CMDID_GET_FREQ_COMPENSATION      DPRTC_CMD(0x1d2)
+#define DPRTC_CMDID_GET_TIME                   DPRTC_CMD(0x1d3)
+#define DPRTC_CMDID_SET_TIME                   DPRTC_CMD(0x1d4)
+
+#pragma pack(push, 1)
+struct dprtc_cmd_open {
+       __le32 dprtc_id;
+};
+
+struct dprtc_get_freq_compensation {
+       __le32 freq_compensation;
+};
+
+struct dprtc_time {
+       __le64 time;
+};
+
+#pragma pack(pop)
+
+#endif /* _FSL_DPRTC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.c b/drivers/net/ethernet/freescale/dpaa2/dprtc.c
new file mode 100644 (file)
index 0000000..c13e09b
--- /dev/null
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#include <linux/fsl/mc.h>
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+/**
+ * dprtc_open() - Open a control session for the specified object.
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dprtc_id:  DPRTC unique ID
+ * @token:     Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dprtc_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dprtc_open(struct fsl_mc_io *mc_io,
+              u32 cmd_flags,
+              int dprtc_id,
+              u16 *token)
+{
+       struct dprtc_cmd_open *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+       int err;
+
+       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
+                                         cmd_flags,
+                                         0);
+       cmd_params = (struct dprtc_cmd_open *)cmd.params;
+       cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
+
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return err;
+
+       *token = mc_cmd_hdr_read_token(&cmd);
+
+       return 0;
+}
+
+/**
+ * dprtc_close() - Close the control session of the object
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPRTC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dprtc_close(struct fsl_mc_io *mc_io,
+               u32 cmd_flags,
+               u16 token)
+{
+       struct fsl_mc_command cmd = { 0 };
+
+       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
+                                         token);
+
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
+ *
+ * @mc_io:             Pointer to MC portal's I/O object
+ * @cmd_flags:         Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:             Token of DPRTC object
+ * @freq_compensation: The new frequency compensation value to set.
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
+                               u32 cmd_flags,
+                               u16 token,
+                               u32 freq_compensation)
+{
+       struct dprtc_get_freq_compensation *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
+       cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
+
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
+ *
+ * @mc_io:             Pointer to MC portal's I/O object
+ * @cmd_flags:         Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:             Token of DPRTC object
+ * @freq_compensation: Frequency compensation value
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
+                               u32 cmd_flags,
+                               u16 token,
+                               u32 *freq_compensation)
+{
+       struct dprtc_get_freq_compensation *rsp_params;
+       struct fsl_mc_command cmd = { 0 };
+       int err;
+
+       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
+                                         cmd_flags,
+                                         token);
+
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return err;
+
+       rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
+       *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
+
+       return 0;
+}
+
+/**
+ * dprtc_get_time() - Returns the current RTC time.
+ *
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPRTC object
+ * @time:      Current RTC time.
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dprtc_get_time(struct fsl_mc_io *mc_io,
+                  u32 cmd_flags,
+                  u16 token,
+                  uint64_t *time)
+{
+       struct dprtc_time *rsp_params;
+       struct fsl_mc_command cmd = { 0 };
+       int err;
+
+       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
+                                         cmd_flags,
+                                         token);
+
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return err;
+
+       rsp_params = (struct dprtc_time *)cmd.params;
+       *time = le64_to_cpu(rsp_params->time);
+
+       return 0;
+}
+
+/**
+ * dprtc_set_time() - Updates current RTC time.
+ *
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPRTC object
+ * @time:      New RTC time.
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dprtc_set_time(struct fsl_mc_io *mc_io,
+                  u32 cmd_flags,
+                  u16 token,
+                  uint64_t time)
+{
+       struct dprtc_time *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dprtc_time *)cmd.params;
+       cmd_params->time = cpu_to_le64(time);
+
+       return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
new file mode 100644 (file)
index 0000000..fe19618
--- /dev/null
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef __FSL_DPRTC_H
+#define __FSL_DPRTC_H
+
+/* Data Path Real Time Counter API
+ * Contains initialization APIs and runtime control APIs for RTC
+ */
+
+struct fsl_mc_io;
+
+int dprtc_open(struct fsl_mc_io *mc_io,
+              u32 cmd_flags,
+              int dprtc_id,
+              u16 *token);
+
+int dprtc_close(struct fsl_mc_io *mc_io,
+               u32 cmd_flags,
+               u16 token);
+
+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
+                               u32 cmd_flags,
+                               u16 token,
+                               u32 freq_compensation);
+
+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
+                               u32 cmd_flags,
+                               u16 token,
+                               u32 *freq_compensation);
+
+int dprtc_get_time(struct fsl_mc_io *mc_io,
+                  u32 cmd_flags,
+                  u16 token,
+                  uint64_t *time);
+
+int dprtc_set_time(struct fsl_mc_io *mc_io,
+                  u32 cmd_flags,
+                  u16 token,
+                  uint64_t time);
+
+#endif /* __FSL_DPRTC_H */
index ce74b7a46d0729626727dae52365fe7b89555e7a..a17cc973d9a30284506eb72066462d8ac1e5d264 100644 (file)
@@ -1158,7 +1158,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
                napi_disable(&fep->napi);
                netif_tx_lock_bh(ndev);
                fec_restart(ndev);
-               netif_wake_queue(ndev);
+               netif_tx_wake_all_queues(ndev);
                netif_tx_unlock_bh(ndev);
                napi_enable(&fep->napi);
        }
@@ -1273,7 +1273,7 @@ skb_done:
 
                /* Since we have freed up a buffer, the ring is no longer full
                 */
-               if (netif_queue_stopped(ndev)) {
+               if (netif_tx_queue_stopped(nq)) {
                        entries_free = fec_enet_get_free_txdesc_num(txq);
                        if (entries_free >= txq->tx_wake_threshold)
                                netif_tx_wake_queue(nq);
@@ -1746,7 +1746,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
                        napi_disable(&fep->napi);
                        netif_tx_lock_bh(ndev);
                        fec_restart(ndev);
-                       netif_wake_queue(ndev);
+                       netif_tx_wake_all_queues(ndev);
                        netif_tx_unlock_bh(ndev);
                        napi_enable(&fep->napi);
                }
@@ -2240,7 +2240,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
                napi_disable(&fep->napi);
                netif_tx_lock_bh(ndev);
                fec_restart(ndev);
-               netif_wake_queue(ndev);
+               netif_tx_wake_all_queues(ndev);
                netif_tx_unlock_bh(ndev);
                napi_enable(&fep->napi);
        }
index a847b9c3b31a9a4e5c9c6f9734973c89d596e4c6..d79e4e009d637885271a1b7cdb29e9d24c7bbe02 100644 (file)
@@ -393,11 +393,7 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
         */
 
        /* get local capabilities */
-       lcl_adv = 0;
-       if (phy_dev->advertising & ADVERTISED_Pause)
-               lcl_adv |= ADVERTISE_PAUSE_CAP;
-       if (phy_dev->advertising & ADVERTISED_Asym_Pause)
-               lcl_adv |= ADVERTISE_PAUSE_ASYM;
+       lcl_adv = ethtool_adv_to_lcl_adv_t(phy_dev->advertising);
 
        /* get link partner capabilities */
        rmt_adv = 0;
index 0bd21a49301672a37288e53ad776f1cd3a1ae9a2..3c8da1a18ba08cc7d266eff374363eaeb3faf47e 100644 (file)
@@ -3656,12 +3656,7 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
                if (phydev->asym_pause)
                        rmt_adv |= LPA_PAUSE_ASYM;
 
-               lcl_adv = 0;
-               if (phydev->advertising & ADVERTISED_Pause)
-                       lcl_adv |= ADVERTISE_PAUSE_CAP;
-               if (phydev->advertising & ADVERTISED_Asym_Pause)
-                       lcl_adv |= ADVERTISE_PAUSE_ASYM;
-
+               lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
                flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
                if (flowctrl & FLOW_CTRL_TX)
                        val |= MACCFG1_TX_FLOW;
index a051e582d541ad2e2191567b9b3b3d7d69a90fc0..79d03f8ee7b180d2cab9a2a647254461c0a0cb08 100644 (file)
@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
        if (cb->type == DESC_TYPE_SKB)
                dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
                                 ring_to_dma_dir(ring));
-       else
+       else if (cb->length)
                dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
                               ring_to_dma_dir(ring));
 }
index f56855e63c961333f20f842a3558a920d201ccc9..28e907831b0eddbf760e0edb579ae7ae708520e0 100644 (file)
@@ -40,9 +40,9 @@
 #define SKB_TMP_LEN(SKB) \
        (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
 
-static void fill_v2_desc(struct hnae_ring *ring, void *priv,
-                        int size, dma_addr_t dma, int frag_end,
-                        int buf_num, enum hns_desc_type type, int mtu)
+static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
+                           int send_sz, dma_addr_t dma, int frag_end,
+                           int buf_num, enum hns_desc_type type, int mtu)
 {
        struct hnae_desc *desc = &ring->desc[ring->next_to_use];
        struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
        desc_cb->type = type;
 
        desc->addr = cpu_to_le64(dma);
-       desc->tx.send_size = cpu_to_le16((u16)size);
+       desc->tx.send_size = cpu_to_le16((u16)send_sz);
 
        /* config bd buffer end */
        hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
        ring_ptr_move_fw(ring, next_to_use);
 }
 
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+                        int size, dma_addr_t dma, int frag_end,
+                        int buf_num, enum hns_desc_type type, int mtu)
+{
+       fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
+                       buf_num, type, mtu);
+}
+
 static const struct acpi_device_id hns_enet_acpi_match[] = {
        { "HISI00C1", 0 },
        { "HISI00C2", 0 },
@@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
 
        /* when the frag size is bigger than hardware, split this frag */
        for (k = 0; k < frag_buf_num; k++)
-               fill_v2_desc(ring, priv,
-                            (k == frag_buf_num - 1) ?
+               fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
+                               (k == frag_buf_num - 1) ?
                                        sizeoflast : BD_MAX_SEND_SIZE,
-                            dma + BD_MAX_SEND_SIZE * k,
-                            frag_end && (k == frag_buf_num - 1) ? 1 : 0,
-                            buf_num,
-                            (type == DESC_TYPE_SKB && !k) ?
+                               dma + BD_MAX_SEND_SIZE * k,
+                               frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+                               buf_num,
+                               (type == DESC_TYPE_SKB && !k) ?
                                        DESC_TYPE_SKB : DESC_TYPE_PAGE,
-                            mtu);
+                               mtu);
 }
 
 netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
@@ -1495,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
        return phy_mii_ioctl(phy_dev, ifr, cmd);
 }
 
-/* use only for netconsole to poll with the device without interrupt */
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hns_nic_poll_controller(struct net_device *ndev)
-{
-       struct hns_nic_priv *priv = netdev_priv(ndev);
-       unsigned long flags;
-       int i;
-
-       local_irq_save(flags);
-       for (i = 0; i < priv->ae_handle->q_num * 2; i++)
-               napi_schedule(&priv->ring_data[i].napi);
-       local_irq_restore(flags);
-}
-#endif
-
 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
                                    struct net_device *ndev)
 {
@@ -1962,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = {
        .ndo_set_features = hns_nic_set_features,
        .ndo_fix_features = hns_nic_fix_features,
        .ndo_get_stats64 = hns_nic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = hns_nic_poll_controller,
-#endif
        .ndo_set_rx_mode = hns_nic_set_rx_mode,
        .ndo_select_queue = hns_nic_select_queue,
 };
index be9dc08ccf6785762a329016ed3f3e20d2f21dad..038326cfda93d736a52dbde2a46d4a2ef94c5cc4 100644 (file)
@@ -46,9 +46,6 @@ enum hclge_mbx_mac_vlan_subcode {
        HCLGE_MBX_MAC_VLAN_MC_MODIFY,           /* modify MC mac addr */
        HCLGE_MBX_MAC_VLAN_MC_ADD,              /* add new MC mac addr */
        HCLGE_MBX_MAC_VLAN_MC_REMOVE,           /* remove MC mac addr */
-       HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,  /* config func MTA enable */
-       HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,       /* read func MTA type */
-       HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,   /* update MTA status */
 };
 
 /* below are per-VF vlan cfg subcodes */
index 564afd4c1323b2355dae6323643fda2c6e5e9991..1b49c5d3340b691c92ef81f4a7115a5278b3199a 100644 (file)
@@ -51,6 +51,7 @@
 #define HNAE3_KNIC_CLIENT_INITED_B             0x3
 #define HNAE3_UNIC_CLIENT_INITED_B             0x4
 #define HNAE3_ROCE_CLIENT_INITED_B             0x5
+#define HNAE3_DEV_SUPPORT_FD_B                 0x6
 
 #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
                BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -61,6 +62,9 @@
 #define hnae3_dev_dcb_supported(hdev) \
        hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
 
+#define hnae3_dev_fd_supported(hdev) \
+       hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
+
 #define ring_ptr_move_fw(ring, p) \
        ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
 #define ring_ptr_move_bw(ring, p) \
@@ -84,10 +88,11 @@ struct hnae3_queue {
 
 /*hnae3 loop mode*/
 enum hnae3_loop {
-       HNAE3_MAC_INTER_LOOP_MAC,
-       HNAE3_MAC_INTER_LOOP_SERDES,
-       HNAE3_MAC_INTER_LOOP_PHY,
-       HNAE3_MAC_LOOP_NONE,
+       HNAE3_LOOP_APP,
+       HNAE3_LOOP_SERIAL_SERDES,
+       HNAE3_LOOP_PARALLEL_SERDES,
+       HNAE3_LOOP_PHY,
+       HNAE3_LOOP_NONE,
 };
 
 enum hnae3_client_type {
@@ -174,6 +179,7 @@ struct hnae3_ae_dev {
        struct list_head node;
        u32 flag;
        enum hnae3_dev_type dev_type;
+       enum hnae3_reset_type reset_type;
        void *priv;
 };
 
@@ -349,8 +355,6 @@ struct hnae3_ae_ops {
                           const unsigned char *addr);
        int (*rm_mc_addr)(struct hnae3_handle *handle,
                          const unsigned char *addr);
-       int (*update_mta_status)(struct hnae3_handle *handle);
-
        void (*set_tso_stats)(struct hnae3_handle *handle, int enable);
        void (*update_stats)(struct hnae3_handle *handle,
                             struct net_device_stats *net_stats);
@@ -402,7 +406,7 @@ struct hnae3_ae_ops {
        void (*get_channels)(struct hnae3_handle *handle,
                             struct ethtool_channels *ch);
        void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
-                                     u16 *free_tqps, u16 *max_rss_size);
+                                     u16 *alloc_tqps, u16 *max_rss_size);
        int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
        void (*get_flowctrl_adv)(struct hnae3_handle *handle,
                                 u32 *flowctrl_adv);
@@ -411,6 +415,20 @@ struct hnae3_ae_ops {
        void (*get_link_mode)(struct hnae3_handle *handle,
                              unsigned long *supported,
                              unsigned long *advertising);
+       int (*add_fd_entry)(struct hnae3_handle *handle,
+                           struct ethtool_rxnfc *cmd);
+       int (*del_fd_entry)(struct hnae3_handle *handle,
+                           struct ethtool_rxnfc *cmd);
+       void (*del_all_fd_entries)(struct hnae3_handle *handle,
+                                  bool clear_list);
+       int (*get_fd_rule_cnt)(struct hnae3_handle *handle,
+                              struct ethtool_rxnfc *cmd);
+       int (*get_fd_rule_info)(struct hnae3_handle *handle,
+                               struct ethtool_rxnfc *cmd);
+       int (*get_fd_all_rules)(struct hnae3_handle *handle,
+                               struct ethtool_rxnfc *cmd, u32 *rule_locs);
+       int (*restore_fd_rules)(struct hnae3_handle *handle);
+       void (*enable_fd)(struct hnae3_handle *handle, bool enable);
 };
 
 struct hnae3_dcb_ops {
@@ -478,10 +496,11 @@ struct hnae3_unic_private_info {
        struct hnae3_queue **tqp;  /* array base of all TQPs of this instance */
 };
 
-#define HNAE3_SUPPORT_MAC_LOOPBACK    BIT(0)
+#define HNAE3_SUPPORT_APP_LOOPBACK    BIT(0)
 #define HNAE3_SUPPORT_PHY_LOOPBACK    BIT(1)
-#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2)
+#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK   BIT(2)
 #define HNAE3_SUPPORT_VF             BIT(3)
+#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
 
 struct hnae3_handle {
        struct hnae3_client *client;
index 5a4773a9b4a57ce97f7f8d10fd6e355c7e7e6d46..e9d4564b8ce1c2a862f47dfdf1fe66bc931c4205 100644 (file)
@@ -21,6 +21,7 @@
 
 static void hns3_clear_all_ring(struct hnae3_handle *h);
 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
+static void hns3_remove_hw_addr(struct net_device *netdev);
 
 static const char hns3_driver_name[] = "hns3";
 const char hns3_driver_version[] = VERMAGIC_STRING;
@@ -225,8 +226,6 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
                                   struct hns3_nic_priv *priv)
 {
-       struct hnae3_handle *h = priv->ae_handle;
-
        /* initialize the configuration for interrupt coalescing.
         * 1. GL (Interrupt Gap Limiter)
         * 2. RL (Interrupt Rate Limiter)
@@ -239,9 +238,6 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
        tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
        tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
 
-       /* Default: disable RL */
-       h->kinfo.int_rl_setting = 0;
-
        tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
        tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
        tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
@@ -307,12 +303,12 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
 
 static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
 {
-       u16 free_tqps, max_rss_size, max_tqps;
+       u16 alloc_tqps, max_rss_size, rss_size;
 
-       h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
-       max_tqps = h->kinfo.num_tc * max_rss_size;
+       h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
+       rss_size = alloc_tqps / h->kinfo.num_tc;
 
-       return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
+       return min_t(u16, rss_size, max_rss_size);
 }
 
 static int hns3_nic_net_up(struct net_device *netdev)
@@ -480,9 +476,6 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
        if (netdev->flags & IFF_MULTICAST) {
                if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
                        netdev_err(netdev, "sync mc address fail\n");
-
-               if (h->ae_algo->ops->update_mta_status)
-                       h->ae_algo->ops->update_mta_status(h);
        }
 }
 
@@ -1290,6 +1283,13 @@ static int hns3_nic_set_features(struct net_device *netdev,
                        return ret;
        }
 
+       if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
+               if (features & NETIF_F_NTUPLE)
+                       h->ae_algo->ops->enable_fd(h, true);
+               else
+                       h->ae_algo->ops->enable_fd(h, false);
+       }
+
        netdev->features = features;
        return 0;
 }
@@ -1491,13 +1491,11 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
        }
 
        ret = h->ae_algo->ops->set_mtu(h, new_mtu);
-       if (ret) {
+       if (ret)
                netdev_err(netdev, "failed to change MTU in hardware %d\n",
                           ret);
-               return ret;
-       }
-
-       netdev->mtu = new_mtu;
+       else
+               netdev->mtu = new_mtu;
 
        /* if the netdev was running earlier, bring it up again */
        if (if_running && hns3_nic_net_open(netdev))
@@ -1629,6 +1627,13 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
        pci_disable_sriov(pdev);
 }
 
+static void hns3_get_dev_capability(struct pci_dev *pdev,
+                                   struct hnae3_ae_dev *ae_dev)
+{
+       if (pdev->revision >= 0x21)
+               hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
+}
+
 /* hns3_probe - Device initialization routine
  * @pdev: PCI device information struct
  * @ent: entry in hns3_pci_tbl
@@ -1654,6 +1659,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        ae_dev->pdev = pdev;
        ae_dev->flag = ent->driver_data;
        ae_dev->dev_type = HNAE3_DEV_KNIC;
+       ae_dev->reset_type = HNAE3_NONE_RESET;
+       hns3_get_dev_capability(pdev, ae_dev);
        pci_set_drvdata(pdev, ae_dev);
 
        hnae3_register_ae_dev(ae_dev);
@@ -1740,7 +1747,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
-               NETIF_F_GSO_UDP_TUNNEL_CSUM;
+               NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
 
        netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
 
@@ -1752,24 +1759,30 @@ static void hns3_set_default_feature(struct net_device *netdev)
                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
-               NETIF_F_GSO_UDP_TUNNEL_CSUM;
+               NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
 
        netdev->vlan_features |=
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
                NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
                NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
-               NETIF_F_GSO_UDP_TUNNEL_CSUM;
+               NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
 
        netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
-               NETIF_F_GSO_UDP_TUNNEL_CSUM;
+               NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
 
-       if (pdev->revision != 0x20)
+       if (pdev->revision >= 0x21) {
                netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+               if (!(h->flags & HNAE3_SUPPORT_VF)) {
+                       netdev->hw_features |= NETIF_F_NTUPLE;
+                       netdev->features |= NETIF_F_NTUPLE;
+               }
+       }
 }
 
 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -2187,18 +2200,18 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
        napi_gro_receive(&ring->tqp_vector->napi, skb);
 }
 
-static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
-                              struct hns3_desc *desc, u32 l234info)
+static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
+                               struct hns3_desc *desc, u32 l234info,
+                               u16 *vlan_tag)
 {
        struct pci_dev *pdev = ring->tqp->handle->pdev;
-       u16 vlan_tag;
 
        if (pdev->revision == 0x20) {
-               vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
-               if (!(vlan_tag & VLAN_VID_MASK))
-                       vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+               *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+               if (!(*vlan_tag & VLAN_VID_MASK))
+                       *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
 
-               return vlan_tag;
+               return (*vlan_tag != 0);
        }
 
 #define HNS3_STRP_OUTER_VLAN   0x1
@@ -2207,17 +2220,14 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
        switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
                                HNS3_RXD_STRP_TAGP_S)) {
        case HNS3_STRP_OUTER_VLAN:
-               vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
-               break;
+               *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+               return true;
        case HNS3_STRP_INNER_VLAN:
-               vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
-               break;
+               *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+               return true;
        default:
-               vlan_tag = 0;
-               break;
+               return false;
        }
-
-       return vlan_tag;
 }
 
 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
@@ -2319,8 +2329,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
        if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
                u16 vlan_tag;
 
-               vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
-               if (vlan_tag & VLAN_VID_MASK)
+               if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
                        __vlan_hwaccel_put_tag(skb,
                                               htons(ETH_P_8021Q),
                                               vlan_tag);
@@ -3140,13 +3149,23 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init)
 
 }
 
-static void hns3_uninit_mac_addr(struct net_device *netdev)
+static int hns3_restore_fd_rules(struct net_device *netdev)
 {
-       struct hns3_nic_priv *priv = netdev_priv(netdev);
-       struct hnae3_handle *h = priv->ae_handle;
+       struct hnae3_handle *h = hns3_get_handle(netdev);
+       int ret = 0;
 
-       if (h->ae_algo->ops->rm_uc_addr)
-               h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
+       if (h->ae_algo->ops->restore_fd_rules)
+               ret = h->ae_algo->ops->restore_fd_rules(h);
+
+       return ret;
+}
+
+static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
+{
+       struct hnae3_handle *h = hns3_get_handle(netdev);
+
+       if (h->ae_algo->ops->del_all_fd_entries)
+               h->ae_algo->ops->del_all_fd_entries(h, clear_list);
 }
 
 static void hns3_nic_set_priv_ops(struct net_device *netdev)
@@ -3166,12 +3185,14 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev)
 static int hns3_client_init(struct hnae3_handle *handle)
 {
        struct pci_dev *pdev = handle->pdev;
+       u16 alloc_tqps, max_rss_size;
        struct hns3_nic_priv *priv;
        struct net_device *netdev;
        int ret;
 
-       netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
-                                  hns3_get_max_available_channels(handle));
+       handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
+                                                   &max_rss_size);
+       netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
        if (!netdev)
                return -ENOMEM;
 
@@ -3260,9 +3281,13 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
        struct hns3_nic_priv *priv = netdev_priv(netdev);
        int ret;
 
+       hns3_remove_hw_addr(netdev);
+
        if (netdev->reg_state != NETREG_UNINITIALIZED)
                unregister_netdev(netdev);
 
+       hns3_del_all_fd_rules(netdev, true);
+
        hns3_force_clear_all_rx_ring(handle);
 
        ret = hns3_nic_uninit_vector_data(priv);
@@ -3281,8 +3306,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
 
        priv->ring_data = NULL;
 
-       hns3_uninit_mac_addr(netdev);
-
        free_netdev(netdev);
 }
 
@@ -3354,6 +3377,25 @@ static void hns3_recover_hw_addr(struct net_device *ndev)
                hns3_nic_mc_sync(ndev, ha->addr);
 }
 
+static void hns3_remove_hw_addr(struct net_device *netdev)
+{
+       struct netdev_hw_addr_list *list;
+       struct netdev_hw_addr *ha, *tmp;
+
+       hns3_nic_uc_unsync(netdev, netdev->dev_addr);
+
+       /* go through and unsync uc_addr entries to the device */
+       list = &netdev->uc;
+       list_for_each_entry_safe(ha, tmp, &list->list, list)
+               hns3_nic_uc_unsync(netdev, ha->addr);
+
+       /* go through and unsync mc_addr entries to the device */
+       list = &netdev->mc;
+       list_for_each_entry_safe(ha, tmp, &list->list, list)
+               if (ha->refcount > 1)
+                       hns3_nic_mc_unsync(netdev, ha->addr);
+}
+
 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
 {
        while (ring->next_to_clean != ring->next_to_use) {
@@ -3490,6 +3532,31 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
        return 0;
 }
 
+static void hns3_store_coal(struct hns3_nic_priv *priv)
+{
+       /* ethtool only support setting and querying one coal
+        * configuation for now, so save the vector 0' coal
+        * configuation here in order to restore it.
+        */
+       memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
+              sizeof(struct hns3_enet_coalesce));
+       memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
+              sizeof(struct hns3_enet_coalesce));
+}
+
+static void hns3_restore_coal(struct hns3_nic_priv *priv)
+{
+       u16 vector_num = priv->vector_num;
+       int i;
+
+       for (i = 0; i < vector_num; i++) {
+               memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
+                      sizeof(struct hns3_enet_coalesce));
+               memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
+                      sizeof(struct hns3_enet_coalesce));
+       }
+}
+
 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
 {
        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -3533,9 +3600,13 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
        if (!(handle->flags & HNAE3_SUPPORT_VF))
                hns3_restore_vlan(netdev);
 
+       hns3_restore_fd_rules(netdev);
+
        /* Carrier off reporting is important to ethtool even BEFORE open */
        netif_carrier_off(netdev);
 
+       hns3_restore_coal(priv);
+
        ret = hns3_nic_init_vector_data(priv);
        if (ret)
                return ret;
@@ -3551,6 +3622,7 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
 
 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
 {
+       struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
        struct net_device *netdev = handle->kinfo.netdev;
        struct hns3_nic_priv *priv = netdev_priv(netdev);
        int ret;
@@ -3563,11 +3635,20 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
                return ret;
        }
 
+       hns3_store_coal(priv);
+
        ret = hns3_uninit_all_ring(priv);
        if (ret)
                netdev_err(netdev, "uninit ring error\n");
 
-       hns3_uninit_mac_addr(netdev);
+       /* it is cumbersome for hardware to pick-and-choose entries for deletion
+        * from table space. Hence, for function reset software intervention is
+        * required to delete the entries
+        */
+       if (hns3_dev_ongoing_func_reset(ae_dev)) {
+               hns3_remove_hw_addr(netdev);
+               hns3_del_all_fd_rules(netdev, false);
+       }
 
        return ret;
 }
@@ -3597,24 +3678,7 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
        return ret;
 }
 
-static void hns3_restore_coal(struct hns3_nic_priv *priv,
-                             struct hns3_enet_coalesce *tx,
-                             struct hns3_enet_coalesce *rx)
-{
-       u16 vector_num = priv->vector_num;
-       int i;
-
-       for (i = 0; i < vector_num; i++) {
-               memcpy(&priv->tqp_vector[i].tx_group.coal, tx,
-                      sizeof(struct hns3_enet_coalesce));
-               memcpy(&priv->tqp_vector[i].rx_group.coal, rx,
-                      sizeof(struct hns3_enet_coalesce));
-       }
-}
-
-static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
-                              struct hns3_enet_coalesce *tx,
-                              struct hns3_enet_coalesce *rx)
+static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
 {
        struct hns3_nic_priv *priv = netdev_priv(netdev);
        struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -3632,7 +3696,7 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
        if (ret)
                goto err_alloc_vector;
 
-       hns3_restore_coal(priv, tx, rx);
+       hns3_restore_coal(priv);
 
        ret = hns3_nic_init_vector_data(priv);
        if (ret)
@@ -3664,7 +3728,6 @@ int hns3_set_channels(struct net_device *netdev,
        struct hns3_nic_priv *priv = netdev_priv(netdev);
        struct hnae3_handle *h = hns3_get_handle(netdev);
        struct hnae3_knic_private_info *kinfo = &h->kinfo;
-       struct hns3_enet_coalesce tx_coal, rx_coal;
        bool if_running = netif_running(netdev);
        u32 new_tqp_num = ch->combined_count;
        u16 org_tqp_num;
@@ -3696,15 +3759,7 @@ int hns3_set_channels(struct net_device *netdev,
                goto open_netdev;
        }
 
-       /* Changing the tqp num may also change the vector num,
-        * ethtool only support setting and querying one coal
-        * configuation for now, so save the vector 0' coal
-        * configuation here in order to restore it.
-        */
-       memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal,
-              sizeof(struct hns3_enet_coalesce));
-       memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal,
-              sizeof(struct hns3_enet_coalesce));
+       hns3_store_coal(priv);
 
        hns3_nic_dealloc_vector_data(priv);
 
@@ -3712,10 +3767,9 @@ int hns3_set_channels(struct net_device *netdev,
        hns3_put_ring_config(priv);
 
        org_tqp_num = h->kinfo.num_tqps;
-       ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal);
+       ret = hns3_modify_tqp_num(netdev, new_tqp_num);
        if (ret) {
-               ret = hns3_modify_tqp_num(netdev, org_tqp_num,
-                                         &tx_coal, &rx_coal);
+               ret = hns3_modify_tqp_num(netdev, org_tqp_num);
                if (ret) {
                        /* If revert to old tqp failed, fatal error occurred */
                        dev_err(&netdev->dev,
index 4a56c3d2204f735e4295a64c7e1369eb1a4c1e9b..ac881e8fc05d7fad0173e3f7e97f47b5b0aa1445 100644 (file)
@@ -543,6 +543,8 @@ struct hns3_nic_priv {
        /* Vxlan/Geneve information */
        struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX];
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+       struct hns3_enet_coalesce tx_coal;
+       struct hns3_enet_coalesce rx_coal;
 };
 
 union l3_hdr_info {
@@ -583,6 +585,11 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
        writel(value, reg_addr + reg);
 }
 
+static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
+{
+       return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET));
+}
+
 #define hns3_write_dev(a, reg, value) \
        hns3_write_reg((a)->io_base, (reg), (value))
 
index 8803a8721c8e635ec45be1cce9b782675f9eb424..7d79a074a2142e55c85c24adb7ae33a46173deb7 100644 (file)
@@ -53,7 +53,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
 
 #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
 
-#define HNS3_SELF_TEST_TYPE_NUM                2
+#define HNS3_SELF_TEST_TYPE_NUM         3
 #define HNS3_NIC_LB_TEST_PKT_NUM       1
 #define HNS3_NIC_LB_TEST_RING_ID       0
 #define HNS3_NIC_LB_TEST_PACKET_SIZE   128
@@ -78,8 +78,9 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
                return -EOPNOTSUPP;
 
        switch (loop) {
-       case HNAE3_MAC_INTER_LOOP_SERDES:
-       case HNAE3_MAC_INTER_LOOP_MAC:
+       case HNAE3_LOOP_SERIAL_SERDES:
+       case HNAE3_LOOP_PARALLEL_SERDES:
+       case HNAE3_LOOP_APP:
                ret = h->ae_algo->ops->set_loopback(h, loop, en);
                break;
        default:
@@ -286,13 +287,18 @@ static void hns3_self_test(struct net_device *ndev,
        if (eth_test->flags != ETH_TEST_FL_OFFLINE)
                return;
 
-       st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC;
-       st_param[HNAE3_MAC_INTER_LOOP_MAC][1] =
-                       h->flags & HNAE3_SUPPORT_MAC_LOOPBACK;
+       st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
+       st_param[HNAE3_LOOP_APP][1] =
+                       h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
 
-       st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES;
-       st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] =
-                       h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK;
+       st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES;
+       st_param[HNAE3_LOOP_SERIAL_SERDES][1] =
+                       h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+
+       st_param[HNAE3_LOOP_PARALLEL_SERDES][0] =
+                       HNAE3_LOOP_PARALLEL_SERDES;
+       st_param[HNAE3_LOOP_PARALLEL_SERDES][1] =
+                       h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
 
        if (if_running)
                ndev->netdev_ops->ndo_stop(ndev);
@@ -693,20 +699,33 @@ static int hns3_get_rxnfc(struct net_device *netdev,
 {
        struct hnae3_handle *h = hns3_get_handle(netdev);
 
-       if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple)
+       if (!h->ae_algo || !h->ae_algo->ops)
                return -EOPNOTSUPP;
 
        switch (cmd->cmd) {
        case ETHTOOL_GRXRINGS:
-               cmd->data = h->kinfo.rss_size;
-               break;
+               cmd->data = h->kinfo.num_tqps;
+               return 0;
        case ETHTOOL_GRXFH:
-               return h->ae_algo->ops->get_rss_tuple(h, cmd);
+               if (h->ae_algo->ops->get_rss_tuple)
+                       return h->ae_algo->ops->get_rss_tuple(h, cmd);
+               return -EOPNOTSUPP;
+       case ETHTOOL_GRXCLSRLCNT:
+               if (h->ae_algo->ops->get_fd_rule_cnt)
+                       return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
+               return -EOPNOTSUPP;
+       case ETHTOOL_GRXCLSRULE:
+               if (h->ae_algo->ops->get_fd_rule_info)
+                       return h->ae_algo->ops->get_fd_rule_info(h, cmd);
+               return -EOPNOTSUPP;
+       case ETHTOOL_GRXCLSRLALL:
+               if (h->ae_algo->ops->get_fd_all_rules)
+                       return h->ae_algo->ops->get_fd_all_rules(h, cmd,
+                                                                rule_locs);
+               return -EOPNOTSUPP;
        default:
                return -EOPNOTSUPP;
        }
-
-       return 0;
 }
 
 static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
@@ -789,12 +808,22 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
 {
        struct hnae3_handle *h = hns3_get_handle(netdev);
 
-       if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple)
+       if (!h->ae_algo || !h->ae_algo->ops)
                return -EOPNOTSUPP;
 
        switch (cmd->cmd) {
        case ETHTOOL_SRXFH:
-               return h->ae_algo->ops->set_rss_tuple(h, cmd);
+               if (h->ae_algo->ops->set_rss_tuple)
+                       return h->ae_algo->ops->set_rss_tuple(h, cmd);
+               return -EOPNOTSUPP;
+       case ETHTOOL_SRXCLSRLINS:
+               if (h->ae_algo->ops->add_fd_entry)
+                       return h->ae_algo->ops->add_fd_entry(h, cmd);
+               return -EOPNOTSUPP;
+       case ETHTOOL_SRXCLSRLDEL:
+               if (h->ae_algo->ops->del_fd_entry)
+                       return h->ae_algo->ops->del_fd_entry(h, cmd);
+               return -EOPNOTSUPP;
        default:
                return -EOPNOTSUPP;
        }
index 821d4c2f84bd3e7415a3a83c9de566fcea9ac491..1ccde67db77087c4bc2fd1332253aeb73e911197 100644 (file)
@@ -175,21 +175,22 @@ enum hclge_opcode_type {
        HCLGE_OPC_MAC_VLAN_REMOVE           = 0x1001,
        HCLGE_OPC_MAC_VLAN_TYPE_ID          = 0x1002,
        HCLGE_OPC_MAC_VLAN_INSERT           = 0x1003,
+       HCLGE_OPC_MAC_VLAN_ALLOCATE         = 0x1004,
        HCLGE_OPC_MAC_ETHTYPE_ADD           = 0x1010,
        HCLGE_OPC_MAC_ETHTYPE_REMOVE    = 0x1011,
-       HCLGE_OPC_MAC_VLAN_MASK_SET     = 0x1012,
-
-       /* Multicast linear table commands */
-       HCLGE_OPC_MTA_MAC_MODE_CFG          = 0x1020,
-       HCLGE_OPC_MTA_MAC_FUNC_CFG          = 0x1021,
-       HCLGE_OPC_MTA_TBL_ITEM_CFG          = 0x1022,
-       HCLGE_OPC_MTA_TBL_ITEM_QUERY    = 0x1023,
 
        /* VLAN commands */
        HCLGE_OPC_VLAN_FILTER_CTRL          = 0x1100,
        HCLGE_OPC_VLAN_FILTER_PF_CFG    = 0x1101,
        HCLGE_OPC_VLAN_FILTER_VF_CFG    = 0x1102,
 
+       /* Flow Director commands */
+       HCLGE_OPC_FD_MODE_CTRL          = 0x1200,
+       HCLGE_OPC_FD_GET_ALLOCATION     = 0x1201,
+       HCLGE_OPC_FD_KEY_CONFIG         = 0x1202,
+       HCLGE_OPC_FD_TCAM_OP            = 0x1203,
+       HCLGE_OPC_FD_AD_OP              = 0x1204,
+
        /* MDIO command */
        HCLGE_OPC_MDIO_CONFIG           = 0x1900,
 
@@ -395,6 +396,8 @@ struct hclge_pf_res_cmd {
 #define HCLGE_CFG_RSS_SIZE_M   GENMASK(31, 24)
 #define HCLGE_CFG_SPEED_ABILITY_S      0
 #define HCLGE_CFG_SPEED_ABILITY_M      GENMASK(7, 0)
+#define HCLGE_CFG_UMV_TBL_SPACE_S      16
+#define HCLGE_CFG_UMV_TBL_SPACE_M      GENMASK(31, 16)
 
 struct hclge_cfg_param_cmd {
        __le32 offset;
@@ -584,13 +587,12 @@ struct hclge_mac_vlan_tbl_entry_cmd {
        u8      rsv2[6];
 };
 
-#define HCLGE_VLAN_MASK_EN_B           0
-struct hclge_mac_vlan_mask_entry_cmd {
-       u8 rsv0[2];
-       u8 vlan_mask;
-       u8 rsv1;
-       u8 mac_mask[6];
-       u8 rsv2[14];
+#define HCLGE_UMV_SPC_ALC_B    0
+struct hclge_umv_spc_alc_cmd {
+       u8 allocate;
+       u8 rsv1[3];
+       __le32 space_size;
+       u8 rsv2[16];
 };
 
 #define HCLGE_MAC_MGR_MASK_VLAN_B              BIT(0)
@@ -615,30 +617,6 @@ struct hclge_mac_mgr_tbl_entry_cmd {
        u8      rsv3[2];
 };
 
-#define HCLGE_CFG_MTA_MAC_SEL_S                0
-#define HCLGE_CFG_MTA_MAC_SEL_M                GENMASK(1, 0)
-#define HCLGE_CFG_MTA_MAC_EN_B         7
-struct hclge_mta_filter_mode_cmd {
-       u8      dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */
-       u8      rsv[23];
-};
-
-#define HCLGE_CFG_FUNC_MTA_ACCEPT_B    0
-struct hclge_cfg_func_mta_filter_cmd {
-       u8      accept; /* Only used lowest 1 bit */
-       u8      function_id;
-       u8      rsv[22];
-};
-
-#define HCLGE_CFG_MTA_ITEM_ACCEPT_B    0
-#define HCLGE_CFG_MTA_ITEM_IDX_S       0
-#define HCLGE_CFG_MTA_ITEM_IDX_M       GENMASK(11, 0)
-struct hclge_cfg_func_mta_item_cmd {
-       __le16  item_idx; /* Only used lowest 12 bit */
-       u8      accept;   /* Only used lowest 1 bit */
-       u8      rsv[21];
-};
-
 struct hclge_mac_vlan_add_cmd {
        __le16  flags;
        __le16  mac_addr_hi16;
@@ -778,6 +756,7 @@ struct hclge_reset_cmd {
 };
 
 #define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B   BIT(0)
+#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2)
 #define HCLGE_CMD_SERDES_DONE_B                        BIT(0)
 #define HCLGE_CMD_SERDES_SUCCESS_B             BIT(1)
 struct hclge_serdes_lb_cmd {
@@ -818,6 +797,76 @@ struct hclge_set_led_state_cmd {
        u8 rsv2[20];
 };
 
+struct hclge_get_fd_mode_cmd {
+       u8 mode;
+       u8 enable;
+       u8 rsv[22];
+};
+
+struct hclge_get_fd_allocation_cmd {
+       __le32 stage1_entry_num;
+       __le32 stage2_entry_num;
+       __le16 stage1_counter_num;
+       __le16 stage2_counter_num;
+       u8 rsv[12];
+};
+
+struct hclge_set_fd_key_config_cmd {
+       u8 stage;
+       u8 key_select;
+       u8 inner_sipv6_word_en;
+       u8 inner_dipv6_word_en;
+       u8 outer_sipv6_word_en;
+       u8 outer_dipv6_word_en;
+       u8 rsv1[2];
+       __le32 tuple_mask;
+       __le32 meta_data_mask;
+       u8 rsv2[8];
+};
+
+#define HCLGE_FD_EPORT_SW_EN_B         0
+struct hclge_fd_tcam_config_1_cmd {
+       u8 stage;
+       u8 xy_sel;
+       u8 port_info;
+       u8 rsv1[1];
+       __le32 index;
+       u8 entry_vld;
+       u8 rsv2[7];
+       u8 tcam_data[8];
+};
+
+struct hclge_fd_tcam_config_2_cmd {
+       u8 tcam_data[24];
+};
+
+struct hclge_fd_tcam_config_3_cmd {
+       u8 tcam_data[20];
+       u8 rsv[4];
+};
+
+#define HCLGE_FD_AD_DROP_B             0
+#define HCLGE_FD_AD_DIRECT_QID_B       1
+#define HCLGE_FD_AD_QID_S              2
+#define HCLGE_FD_AD_QID_M              GENMASK(12, 2)
+#define HCLGE_FD_AD_USE_COUNTER_B      12
+#define HCLGE_FD_AD_COUNTER_NUM_S      13
+#define HCLGE_FD_AD_COUNTER_NUM_M      GENMASK(20, 13)
+#define HCLGE_FD_AD_NXT_STEP_B         20
+#define HCLGE_FD_AD_NXT_KEY_S          21
+#define HCLGE_FD_AD_NXT_KEY_M          GENMASK(26, 21)
+#define HCLGE_FD_AD_WR_RULE_ID_B       0
+#define HCLGE_FD_AD_RULE_ID_S          1
+#define HCLGE_FD_AD_RULE_ID_M          GENMASK(13, 1)
+
+struct hclge_fd_ad_config_cmd {
+       u8 stage;
+       u8 rsv1[3];
+       __le32 index;
+       __le64 ad_data;
+       u8 rsv2[8];
+};
+
 int hclge_cmd_init(struct hclge_dev *hdev);
 static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
 {
index 92f19384e25857ccef5d52f35329951250449057..e72f724123d7674858266c47c01403ab156ebaee 100644 (file)
@@ -184,7 +184,9 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
        if (ret)
                return ret;
 
-       hclge_tm_schd_info_update(hdev, num_tc);
+       ret = hclge_tm_schd_info_update(hdev, num_tc);
+       if (ret)
+               return ret;
 
        ret = hclge_ieee_ets_to_tm_info(hdev, ets);
        if (ret)
@@ -310,7 +312,9 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
                return -EINVAL;
        }
 
-       hclge_tm_schd_info_update(hdev, tc);
+       ret = hclge_tm_schd_info_update(hdev, tc);
+       if (ret)
+               return ret;
 
        ret = hclge_tm_prio_tc_info_update(hdev, prio_tc);
        if (ret)
index 2de5a0061dc9e488bdb26815d1d8094e4389d24c..ca1a93664d0e34382600079212ea1c101fd540cd 100644 (file)
 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
 
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
-                                    enum hclge_mta_dmac_sel_type mta_mac_sel,
-                                    bool enable);
 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
 static int hclge_init_vlan_config(struct hclge_dev *hdev);
 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
+                              u16 *allocated_size, bool is_alloc);
 
 static struct hnae3_ae_algo ae_algo;
 
@@ -49,8 +48,9 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
 
 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
-       "Mac    Loopback test",
-       "Serdes Loopback test",
+       "App    Loopback test",
+       "Serdes serial Loopback test",
+       "Serdes parallel Loopback test",
        "Phy    Loopback test"
 };
 
@@ -475,7 +475,10 @@ static void hclge_update_stats(struct hnae3_handle *handle,
 
 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
 {
-#define HCLGE_LOOPBACK_TEST_FLAGS 0x7
+#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
+               HNAE3_SUPPORT_PHY_LOOPBACK |\
+               HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
+               HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
 
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
@@ -489,15 +492,17 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
        if (stringset == ETH_SS_TEST) {
                /* clear loopback bit flags at first */
                handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
-               if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
+               if (hdev->pdev->revision >= 0x21 ||
+                   hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
                    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
                    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
                        count += 1;
-                       handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
+                       handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
                }
 
-               count++;
-               handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK;
+               count += 2;
+               handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+               handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
        } else if (stringset == ETH_SS_STATS) {
                count = ARRAY_SIZE(g_mac_stats_string) +
                        hclge_tqps_get_sset_count(handle, stringset);
@@ -521,21 +526,27 @@ static void hclge_get_strings(struct hnae3_handle *handle,
                                           p);
                p = hclge_tqps_get_strings(handle, p);
        } else if (stringset == ETH_SS_TEST) {
-               if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
+               if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
                        memcpy(p,
-                              hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
+                              hns3_nic_test_strs[HNAE3_LOOP_APP],
                               ETH_GSTRING_LEN);
                        p += ETH_GSTRING_LEN;
                }
-               if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
+               if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
                        memcpy(p,
-                              hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
+                              hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+               if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
+                       memcpy(p,
+                              hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
                               ETH_GSTRING_LEN);
                        p += ETH_GSTRING_LEN;
                }
                if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
                        memcpy(p,
-                              hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
+                              hns3_nic_test_strs[HNAE3_LOOP_PHY],
                               ETH_GSTRING_LEN);
                        p += ETH_GSTRING_LEN;
                }
@@ -766,6 +777,11 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
        cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
                                             HCLGE_CFG_SPEED_ABILITY_M,
                                             HCLGE_CFG_SPEED_ABILITY_S);
+       cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
+                                        HCLGE_CFG_UMV_TBL_SPACE_M,
+                                        HCLGE_CFG_UMV_TBL_SPACE_S);
+       if (!cfg->umv_space)
+               cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
 }
 
 /* hclge_get_cfg: query the static parameter from flash
@@ -844,6 +860,7 @@ static int hclge_configure(struct hclge_dev *hdev)
        hdev->tm_info.num_pg = 1;
        hdev->tc_max = cfg.tc_num;
        hdev->tm_info.hw_pfc_map = 0;
+       hdev->wanted_umv_size = cfg.umv_space;
 
        ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
        if (ret) {
@@ -1344,11 +1361,13 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
                                struct hclge_pkt_buf_alloc *buf_alloc)
 {
-       u32 rx_all = hdev->pkt_buf_size;
+#define HCLGE_BUF_SIZE_UNIT    128
+       u32 rx_all = hdev->pkt_buf_size, aligned_mps;
        int no_pfc_priv_num, pfc_priv_num;
        struct hclge_priv_buf *priv;
        int i;
 
+       aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
        rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
 
        /* When DCB is not supported, rx private
@@ -1367,13 +1386,13 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
                if (hdev->hw_tc_map & BIT(i)) {
                        priv->enable = 1;
                        if (hdev->tm_info.hw_pfc_map & BIT(i)) {
-                               priv->wl.low = hdev->mps;
-                               priv->wl.high = priv->wl.low + hdev->mps;
+                               priv->wl.low = aligned_mps;
+                               priv->wl.high = priv->wl.low + aligned_mps;
                                priv->buf_size = priv->wl.high +
                                                HCLGE_DEFAULT_DV;
                        } else {
                                priv->wl.low = 0;
-                               priv->wl.high = 2 * hdev->mps;
+                               priv->wl.high = 2 * aligned_mps;
                                priv->buf_size = priv->wl.high;
                        }
                } else {
@@ -1405,11 +1424,11 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
 
                if (hdev->tm_info.hw_pfc_map & BIT(i)) {
                        priv->wl.low = 128;
-                       priv->wl.high = priv->wl.low + hdev->mps;
+                       priv->wl.high = priv->wl.low + aligned_mps;
                        priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
                } else {
                        priv->wl.low = 0;
-                       priv->wl.high = hdev->mps;
+                       priv->wl.high = aligned_mps;
                        priv->buf_size = priv->wl.high;
                }
        }
@@ -1925,40 +1944,13 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
        return hdev->hw.mac.autoneg;
 }
 
-static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
-                                          bool mask_vlan,
-                                          u8 *mac_mask)
-{
-       struct hclge_mac_vlan_mask_entry_cmd *req;
-       struct hclge_desc desc;
-       int status;
-
-       req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
-
-       hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
-                     mask_vlan ? 1 : 0);
-       ether_addr_copy(req->mac_mask, mac_mask);
-
-       status = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (status)
-               dev_err(&hdev->pdev->dev,
-                       "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
-                       status);
-
-       return status;
-}
-
 static int hclge_mac_init(struct hclge_dev *hdev)
 {
        struct hnae3_handle *handle = &hdev->vport[0].nic;
        struct net_device *netdev = handle->kinfo.netdev;
        struct hclge_mac *mac = &hdev->hw.mac;
-       u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-       struct hclge_vport *vport;
        int mtu;
        int ret;
-       int i;
 
        hdev->hw.mac.duplex = HCLGE_MAC_FULL;
        ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
@@ -1971,39 +1963,6 @@ static int hclge_mac_init(struct hclge_dev *hdev)
 
        mac->link = 0;
 
-       /* Initialize the MTA table work mode */
-       hdev->enable_mta        = true;
-       hdev->mta_mac_sel_type  = HCLGE_MAC_ADDR_47_36;
-
-       ret = hclge_set_mta_filter_mode(hdev,
-                                       hdev->mta_mac_sel_type,
-                                       hdev->enable_mta);
-       if (ret) {
-               dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
-                       ret);
-               return ret;
-       }
-
-       for (i = 0; i < hdev->num_alloc_vport; i++) {
-               vport = &hdev->vport[i];
-               vport->accept_mta_mc = false;
-
-               memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
-               ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
-               if (ret) {
-                       dev_err(&hdev->pdev->dev,
-                               "set mta filter mode fail ret=%d\n", ret);
-                       return ret;
-               }
-       }
-
-       ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
-       if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "set default mac_vlan_mask fail ret=%d\n", ret);
-               return ret;
-       }
-
        if (netdev)
                mtu = netdev->mtu;
        else
@@ -2499,8 +2458,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
 
 static void hclge_reset(struct hclge_dev *hdev)
 {
+       struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
        struct hnae3_handle *handle;
 
+       /* Initialize ae_dev reset status as well, in case enet layer wants to
+        * know if device is undergoing reset
+        */
+       ae_dev->reset_type = hdev->reset_type;
        /* perform reset of the stack & ae device for a client */
        handle = &hdev->vport[0].nic;
        rtnl_lock();
@@ -2521,6 +2485,7 @@ static void hclge_reset(struct hclge_dev *hdev)
        hclge_notify_client(hdev, HNAE3_UP_CLIENT);
        handle->last_reset_time = jiffies;
        rtnl_unlock();
+       ae_dev->reset_type = HNAE3_NONE_RESET;
 }
 
 static void hclge_reset_event(struct hnae3_handle *handle)
@@ -3314,261 +3279,1552 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
        hclge_cmd_set_promisc_mode(hdev, &param);
 }
 
-static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
+static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
 {
+       struct hclge_get_fd_mode_cmd *req;
        struct hclge_desc desc;
-       struct hclge_config_mac_mode_cmd *req =
-               (struct hclge_config_mac_mode_cmd *)desc.data;
-       u32 loop_en = 0;
        int ret;
 
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
-       hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
-       hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
-       hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
-       hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
-       hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
-       hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
-       hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
-       hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
-       hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
-       hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
-       hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
-       hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
-       hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
-       hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
-       req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
-
-       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret)
-               dev_err(&hdev->pdev->dev,
-                       "mac enable fail, ret =%d.\n", ret);
-}
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
 
-static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
-{
-       struct hclge_config_mac_mode_cmd *req;
-       struct hclge_desc desc;
-       u32 loop_en;
-       int ret;
+       req = (struct hclge_get_fd_mode_cmd *)desc.data;
 
-       req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
-       /* 1 Read out the MAC mode config at first */
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "mac loopback get fail, ret =%d.\n", ret);
+               dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
                return ret;
        }
 
-       /* 2 Then setup the loopback flag */
-       loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
-       hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
-       hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
-       hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
-
-       req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
+       *fd_mode = req->mode;
 
-       /* 3 Config mac work mode with loopback flag
-        * and its original configure parameters
-        */
-       hclge_cmd_reuse_desc(&desc, false);
-       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret)
-               dev_err(&hdev->pdev->dev,
-                       "mac loopback set fail, ret =%d.\n", ret);
        return ret;
 }
 
-static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en)
+static int hclge_get_fd_allocation(struct hclge_dev *hdev,
+                                  u32 *stage1_entry_num,
+                                  u32 *stage2_entry_num,
+                                  u16 *stage1_counter_num,
+                                  u16 *stage2_counter_num)
 {
-#define HCLGE_SERDES_RETRY_MS  10
-#define HCLGE_SERDES_RETRY_NUM 100
-       struct hclge_serdes_lb_cmd *req;
+       struct hclge_get_fd_allocation_cmd *req;
        struct hclge_desc desc;
-       int ret, i = 0;
+       int ret;
 
-       req = (struct hclge_serdes_lb_cmd *)desc.data;
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
 
-       if (en) {
-               req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
-               req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
-       } else {
-               req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
-       }
+       req = (struct hclge_get_fd_allocation_cmd *)desc.data;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "serdes loopback set fail, ret = %d\n", ret);
+               dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
+                       ret);
                return ret;
        }
 
-       do {
-               msleep(HCLGE_SERDES_RETRY_MS);
-               hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
-                                          true);
-               ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-               if (ret) {
-                       dev_err(&hdev->pdev->dev,
-                               "serdes loopback get, ret = %d\n", ret);
-                       return ret;
-               }
-       } while (++i < HCLGE_SERDES_RETRY_NUM &&
-                !(req->result & HCLGE_CMD_SERDES_DONE_B));
-
-       if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
-               dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
-               return -EBUSY;
-       } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
-               dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
-               return -EIO;
-       }
+       *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
+       *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
+       *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
+       *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
 
-       hclge_cfg_mac_mode(hdev, en);
-       return 0;
+       return ret;
 }
 
-static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
-                           int stream_id, bool enable)
+static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
 {
+       struct hclge_set_fd_key_config_cmd *req;
+       struct hclge_fd_key_cfg *stage;
        struct hclge_desc desc;
-       struct hclge_cfg_com_tqp_queue_cmd *req =
-               (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
        int ret;
 
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
-       req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
-       req->stream_id = cpu_to_le16(stream_id);
-       req->enable |= enable << HCLGE_TQP_ENABLE_B;
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
+
+       req = (struct hclge_set_fd_key_config_cmd *)desc.data;
+       stage = &hdev->fd_cfg.key_cfg[stage_num];
+       req->stage = stage_num;
+       req->key_select = stage->key_sel;
+       req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
+       req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
+       req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
+       req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
+       req->tuple_mask = cpu_to_le32(~stage->tuple_active);
+       req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
-               dev_err(&hdev->pdev->dev,
-                       "Tqp enable fail, status =%d.\n", ret);
+               dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
+
        return ret;
 }
 
-static int hclge_set_loopback(struct hnae3_handle *handle,
-                             enum hnae3_loop loop_mode, bool en)
+static int hclge_init_fd_config(struct hclge_dev *hdev)
 {
-       struct hclge_vport *vport = hclge_get_vport(handle);
-       struct hclge_dev *hdev = vport->back;
-       int i, ret;
+#define LOW_2_WORDS            0x03
+       struct hclge_fd_key_cfg *key_cfg;
+       int ret;
 
-       switch (loop_mode) {
-       case HNAE3_MAC_INTER_LOOP_MAC:
-               ret = hclge_set_mac_loopback(hdev, en);
+       if (!hnae3_dev_fd_supported(hdev))
+               return 0;
+
+       ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
+       if (ret)
+               return ret;
+
+       switch (hdev->fd_cfg.fd_mode) {
+       case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
+               hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
                break;
-       case HNAE3_MAC_INTER_LOOP_SERDES:
-               ret = hclge_set_serdes_loopback(hdev, en);
+       case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
+               hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
                break;
        default:
-               ret = -ENOTSUPP;
                dev_err(&hdev->pdev->dev,
-                       "loop_mode %d is not supported\n", loop_mode);
-               break;
+                       "Unsupported flow director mode %d\n",
+                       hdev->fd_cfg.fd_mode);
+               return -EOPNOTSUPP;
        }
 
-       for (i = 0; i < vport->alloc_tqps; i++) {
-               ret = hclge_tqp_enable(hdev, i, 0, en);
-               if (ret)
-                       return ret;
+       hdev->fd_cfg.fd_en = true;
+       hdev->fd_cfg.proto_support =
+               TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
+               UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
+       key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
+       key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
+       key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
+       key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
+       key_cfg->outer_sipv6_word_en = 0;
+       key_cfg->outer_dipv6_word_en = 0;
+
+       key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
+                               BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
+                               BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+                               BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+       /* If use max 400bit key, we can support tuples for ether type */
+       if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
+               hdev->fd_cfg.proto_support |= ETHER_FLOW;
+               key_cfg->tuple_active |=
+                               BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
        }
 
-       return 0;
-}
+       /* roce_type is used to filter roce frames
+        * dst_vport is used to specify the rule
+        */
+       key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
 
-static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
-{
-       struct hclge_vport *vport = hclge_get_vport(handle);
-       struct hnae3_queue *queue;
-       struct hclge_tqp *tqp;
-       int i;
+       ret = hclge_get_fd_allocation(hdev,
+                                     &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
+                                     &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
+                                     &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
+                                     &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
+       if (ret)
+               return ret;
 
-       for (i = 0; i < vport->alloc_tqps; i++) {
-               queue = handle->kinfo.tqp[i];
-               tqp = container_of(queue, struct hclge_tqp, q);
-               memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
-       }
+       return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
 }
 
-static int hclge_ae_start(struct hnae3_handle *handle)
+static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
+                               int loc, u8 *key, bool is_add)
 {
-       struct hclge_vport *vport = hclge_get_vport(handle);
-       struct hclge_dev *hdev = vport->back;
-       int i;
+       struct hclge_fd_tcam_config_1_cmd *req1;
+       struct hclge_fd_tcam_config_2_cmd *req2;
+       struct hclge_fd_tcam_config_3_cmd *req3;
+       struct hclge_desc desc[3];
+       int ret;
 
-       for (i = 0; i < vport->alloc_tqps; i++)
-               hclge_tqp_enable(hdev, i, 0, true);
+       hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
+       desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+       hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
+       desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+       hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
 
-       /* mac enable */
-       hclge_cfg_mac_mode(hdev, true);
-       clear_bit(HCLGE_STATE_DOWN, &hdev->state);
-       mod_timer(&hdev->service_timer, jiffies + HZ);
-       hdev->hw.mac.link = 0;
+       req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
+       req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
+       req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
 
-       /* reset tqp stats */
-       hclge_reset_tqp_stats(handle);
+       req1->stage = stage;
+       req1->xy_sel = sel_x ? 1 : 0;
+       hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
+       req1->index = cpu_to_le32(loc);
+       req1->entry_vld = sel_x ? is_add : 0;
 
-       hclge_mac_start_phy(hdev);
+       if (key) {
+               memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
+               memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
+                      sizeof(req2->tcam_data));
+               memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
+                      sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
+       }
 
-       return 0;
+       ret = hclge_cmd_send(&hdev->hw, desc, 3);
+       if (ret)
+               dev_err(&hdev->pdev->dev,
+                       "config tcam key fail, ret=%d\n",
+                       ret);
+
+       return ret;
 }
 
-static void hclge_ae_stop(struct hnae3_handle *handle)
+static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
+                             struct hclge_fd_ad_data *action)
 {
-       struct hclge_vport *vport = hclge_get_vport(handle);
-       struct hclge_dev *hdev = vport->back;
-       int i;
+       struct hclge_fd_ad_config_cmd *req;
+       struct hclge_desc desc;
+       u64 ad_data = 0;
+       int ret;
 
-       set_bit(HCLGE_STATE_DOWN, &hdev->state);
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
+
+       req = (struct hclge_fd_ad_config_cmd *)desc.data;
+       req->index = cpu_to_le32(loc);
+       req->stage = stage;
+
+       hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
+                     action->write_rule_id_to_bd);
+       hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
+                       action->rule_id);
+       ad_data <<= 32;
+       hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
+       hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
+                     action->forward_to_direct_queue);
+       hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
+                       action->queue_id);
+       hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
+       hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
+                       HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
+       hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
+       hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
+                       action->counter_id);
+
+       req->ad_data = cpu_to_le64(ad_data);
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret)
+               dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
 
-       del_timer_sync(&hdev->service_timer);
-       cancel_work_sync(&hdev->service_task);
-       clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+       return ret;
+}
 
-       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
-               hclge_mac_stop_phy(hdev);
-               return;
-       }
+static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
+                                  struct hclge_fd_rule *rule)
+{
+       u16 tmp_x_s, tmp_y_s;
+       u32 tmp_x_l, tmp_y_l;
+       int i;
 
-       for (i = 0; i < vport->alloc_tqps; i++)
-               hclge_tqp_enable(hdev, i, 0, false);
+       if (rule->unused_tuple & tuple_bit)
+               return true;
 
-       /* Mac disable */
-       hclge_cfg_mac_mode(hdev, false);
+       switch (tuple_bit) {
+       case 0:
+               return false;
+       case BIT(INNER_DST_MAC):
+               for (i = 0; i < 6; i++) {
+                       calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
+                              rule->tuples_mask.dst_mac[i]);
+                       calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
+                              rule->tuples_mask.dst_mac[i]);
+               }
 
-       hclge_mac_stop_phy(hdev);
+               return true;
+       case BIT(INNER_SRC_MAC):
+               for (i = 0; i < 6; i++) {
+                       calc_x(key_x[5 - i], rule->tuples.src_mac[i],
+                              rule->tuples.src_mac[i]);
+                       calc_y(key_y[5 - i], rule->tuples.src_mac[i],
+                              rule->tuples.src_mac[i]);
+               }
 
-       /* reset tqp stats */
-       hclge_reset_tqp_stats(handle);
-       del_timer_sync(&hdev->service_timer);
-       cancel_work_sync(&hdev->service_task);
-       hclge_update_link_status(hdev);
+               return true;
+       case BIT(INNER_VLAN_TAG_FST):
+               calc_x(tmp_x_s, rule->tuples.vlan_tag1,
+                      rule->tuples_mask.vlan_tag1);
+               calc_y(tmp_y_s, rule->tuples.vlan_tag1,
+                      rule->tuples_mask.vlan_tag1);
+               *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+               *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+               return true;
+       case BIT(INNER_ETH_TYPE):
+               calc_x(tmp_x_s, rule->tuples.ether_proto,
+                      rule->tuples_mask.ether_proto);
+               calc_y(tmp_y_s, rule->tuples.ether_proto,
+                      rule->tuples_mask.ether_proto);
+               *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+               *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+               return true;
+       case BIT(INNER_IP_TOS):
+               calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
+               calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
+
+               return true;
+       case BIT(INNER_IP_PROTO):
+               calc_x(*key_x, rule->tuples.ip_proto,
+                      rule->tuples_mask.ip_proto);
+               calc_y(*key_y, rule->tuples.ip_proto,
+                      rule->tuples_mask.ip_proto);
+
+               return true;
+       case BIT(INNER_SRC_IP):
+               calc_x(tmp_x_l, rule->tuples.src_ip[3],
+                      rule->tuples_mask.src_ip[3]);
+               calc_y(tmp_y_l, rule->tuples.src_ip[3],
+                      rule->tuples_mask.src_ip[3]);
+               *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+               *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
+
+               return true;
+       case BIT(INNER_DST_IP):
+               calc_x(tmp_x_l, rule->tuples.dst_ip[3],
+                      rule->tuples_mask.dst_ip[3]);
+               calc_y(tmp_y_l, rule->tuples.dst_ip[3],
+                      rule->tuples_mask.dst_ip[3]);
+               *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+               *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
+
+               return true;
+       case BIT(INNER_SRC_PORT):
+               calc_x(tmp_x_s, rule->tuples.src_port,
+                      rule->tuples_mask.src_port);
+               calc_y(tmp_y_s, rule->tuples.src_port,
+                      rule->tuples_mask.src_port);
+               *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+               *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+               return true;
+       case BIT(INNER_DST_PORT):
+               calc_x(tmp_x_s, rule->tuples.dst_port,
+                      rule->tuples_mask.dst_port);
+               calc_y(tmp_y_s, rule->tuples.dst_port,
+                      rule->tuples_mask.dst_port);
+               *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+               *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+               return true;
+       default:
+               return false;
+       }
 }
 
-static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
-                                        u16 cmdq_resp, u8  resp_code,
-                                        enum hclge_mac_vlan_tbl_opcode op)
+static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
+                                u8 vf_id, u8 network_port_id)
 {
-       struct hclge_dev *hdev = vport->back;
-       int return_status = -EIO;
+       u32 port_number = 0;
 
-       if (cmdq_resp) {
-               dev_err(&hdev->pdev->dev,
-                       "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
-                       cmdq_resp);
-               return -EIO;
+       if (port_type == HOST_PORT) {
+               hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
+                               pf_id);
+               hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
+                               vf_id);
+               hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
+       } else {
+               hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
+                               HCLGE_NETWORK_PORT_ID_S, network_port_id);
+               hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
        }
 
-       if (op == HCLGE_MAC_VLAN_ADD) {
-               if ((!resp_code) || (resp_code == 1)) {
-                       return_status = 0;
-               } else if (resp_code == 2) {
-                       return_status = -ENOSPC;
+       return port_number;
+}
+
+static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
+                                      __le32 *key_x, __le32 *key_y,
+                                      struct hclge_fd_rule *rule)
+{
+       u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
+       u8 cur_pos = 0, tuple_size, shift_bits;
+       int i;
+
+       for (i = 0; i < MAX_META_DATA; i++) {
+               tuple_size = meta_data_key_info[i].key_length;
+               tuple_bit = key_cfg->meta_data_active & BIT(i);
+
+               switch (tuple_bit) {
+               case BIT(ROCE_TYPE):
+                       hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
+                       cur_pos += tuple_size;
+                       break;
+               case BIT(DST_VPORT):
+                       port_number = hclge_get_port_number(HOST_PORT, 0,
+                                                           rule->vf_id, 0);
+                       hnae3_set_field(meta_data,
+                                       GENMASK(cur_pos + tuple_size, cur_pos),
+                                       cur_pos, port_number);
+                       cur_pos += tuple_size;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       calc_x(tmp_x, meta_data, 0xFFFFFFFF);
+       calc_y(tmp_y, meta_data, 0xFFFFFFFF);
+       shift_bits = sizeof(meta_data) * 8 - cur_pos;
+
+       *key_x = cpu_to_le32(tmp_x << shift_bits);
+       *key_y = cpu_to_le32(tmp_y << shift_bits);
+}
+
+/* A complete key is combined with meta data key and tuple key.
+ * Meta data key is stored at the MSB region, and tuple key is stored at
+ * the LSB region, unused bits will be filled 0.
+ */
+static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
+                           struct hclge_fd_rule *rule)
+{
+       struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
+       u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
+       u8 *cur_key_x, *cur_key_y;
+       int i, ret, tuple_size;
+       u8 meta_data_region;
+
+       memset(key_x, 0, sizeof(key_x));
+       memset(key_y, 0, sizeof(key_y));
+       cur_key_x = key_x;
+       cur_key_y = key_y;
+
+       for (i = 0 ; i < MAX_TUPLE; i++) {
+               bool tuple_valid;
+               u32 check_tuple;
+
+               tuple_size = tuple_key_info[i].key_length / 8;
+               check_tuple = key_cfg->tuple_active & BIT(i);
+
+               tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
+                                                    cur_key_y, rule);
+               if (tuple_valid) {
+                       cur_key_x += tuple_size;
+                       cur_key_y += tuple_size;
+               }
+       }
+
+       meta_data_region = hdev->fd_cfg.max_key_length / 8 -
+                       MAX_META_DATA_LENGTH / 8;
+
+       hclge_fd_convert_meta_data(key_cfg,
+                                  (__le32 *)(key_x + meta_data_region),
+                                  (__le32 *)(key_y + meta_data_region),
+                                  rule);
+
+       ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
+                                  true);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "fd key_y config fail, loc=%d, ret=%d\n",
+                       rule->queue_id, ret);
+               return ret;
+       }
+
+       ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
+                                  true);
+       if (ret)
+               dev_err(&hdev->pdev->dev,
+                       "fd key_x config fail, loc=%d, ret=%d\n",
+                       rule->queue_id, ret);
+       return ret;
+}
+
+static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
+                              struct hclge_fd_rule *rule)
+{
+       struct hclge_fd_ad_data ad_data;
+
+       ad_data.ad_id = rule->location;
+
+       if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+               ad_data.drop_packet = true;
+               ad_data.forward_to_direct_queue = false;
+               ad_data.queue_id = 0;
+       } else {
+               ad_data.drop_packet = false;
+               ad_data.forward_to_direct_queue = true;
+               ad_data.queue_id = rule->queue_id;
+       }
+
+       ad_data.use_counter = false;
+       ad_data.counter_id = 0;
+
+       ad_data.use_next_stage = false;
+       ad_data.next_input_key = 0;
+
+       ad_data.write_rule_id_to_bd = true;
+       ad_data.rule_id = rule->location;
+
+       return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
+}
+
+static int hclge_fd_check_spec(struct hclge_dev *hdev,
+                              struct ethtool_rx_flow_spec *fs, u32 *unused)
+{
+       struct ethtool_tcpip4_spec *tcp_ip4_spec;
+       struct ethtool_usrip4_spec *usr_ip4_spec;
+       struct ethtool_tcpip6_spec *tcp_ip6_spec;
+       struct ethtool_usrip6_spec *usr_ip6_spec;
+       struct ethhdr *ether_spec;
+
+       if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+               return -EINVAL;
+
+       if (!(fs->flow_type & hdev->fd_cfg.proto_support))
+               return -EOPNOTSUPP;
+
+       if ((fs->flow_type & FLOW_EXT) &&
+           (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
+               dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
+               return -EOPNOTSUPP;
+       }
+
+       switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+       case SCTP_V4_FLOW:
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+               tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
+               *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
+
+               if (!tcp_ip4_spec->ip4src)
+                       *unused |= BIT(INNER_SRC_IP);
+
+               if (!tcp_ip4_spec->ip4dst)
+                       *unused |= BIT(INNER_DST_IP);
+
+               if (!tcp_ip4_spec->psrc)
+                       *unused |= BIT(INNER_SRC_PORT);
+
+               if (!tcp_ip4_spec->pdst)
+                       *unused |= BIT(INNER_DST_PORT);
+
+               if (!tcp_ip4_spec->tos)
+                       *unused |= BIT(INNER_IP_TOS);
+
+               break;
+       case IP_USER_FLOW:
+               usr_ip4_spec = &fs->h_u.usr_ip4_spec;
+               *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+                       BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+               if (!usr_ip4_spec->ip4src)
+                       *unused |= BIT(INNER_SRC_IP);
+
+               if (!usr_ip4_spec->ip4dst)
+                       *unused |= BIT(INNER_DST_IP);
+
+               if (!usr_ip4_spec->tos)
+                       *unused |= BIT(INNER_IP_TOS);
+
+               if (!usr_ip4_spec->proto)
+                       *unused |= BIT(INNER_IP_PROTO);
+
+               if (usr_ip4_spec->l4_4_bytes)
+                       return -EOPNOTSUPP;
+
+               if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
+                       return -EOPNOTSUPP;
+
+               break;
+       case SCTP_V6_FLOW:
+       case TCP_V6_FLOW:
+       case UDP_V6_FLOW:
+               tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
+               *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+                       BIT(INNER_IP_TOS);
+
+               if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
+                   !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
+                       *unused |= BIT(INNER_SRC_IP);
+
+               if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
+                   !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
+                       *unused |= BIT(INNER_DST_IP);
+
+               if (!tcp_ip6_spec->psrc)
+                       *unused |= BIT(INNER_SRC_PORT);
+
+               if (!tcp_ip6_spec->pdst)
+                       *unused |= BIT(INNER_DST_PORT);
+
+               if (tcp_ip6_spec->tclass)
+                       return -EOPNOTSUPP;
+
+               break;
+       case IPV6_USER_FLOW:
+               usr_ip6_spec = &fs->h_u.usr_ip6_spec;
+               *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+                       BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
+                       BIT(INNER_DST_PORT);
+
+               if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
+                   !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
+                       *unused |= BIT(INNER_SRC_IP);
+
+               if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
+                   !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
+                       *unused |= BIT(INNER_DST_IP);
+
+               if (!usr_ip6_spec->l4_proto)
+                       *unused |= BIT(INNER_IP_PROTO);
+
+               if (usr_ip6_spec->tclass)
+                       return -EOPNOTSUPP;
+
+               if (usr_ip6_spec->l4_4_bytes)
+                       return -EOPNOTSUPP;
+
+               break;
+       case ETHER_FLOW:
+               ether_spec = &fs->h_u.ether_spec;
+               *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+                       BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
+                       BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+
+               if (is_zero_ether_addr(ether_spec->h_source))
+                       *unused |= BIT(INNER_SRC_MAC);
+
+               if (is_zero_ether_addr(ether_spec->h_dest))
+                       *unused |= BIT(INNER_DST_MAC);
+
+               if (!ether_spec->h_proto)
+                       *unused |= BIT(INNER_ETH_TYPE);
+
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       if ((fs->flow_type & FLOW_EXT)) {
+               if (fs->h_ext.vlan_etype)
+                       return -EOPNOTSUPP;
+               if (!fs->h_ext.vlan_tci)
+                       *unused |= BIT(INNER_VLAN_TAG_FST);
+
+               if (fs->m_ext.vlan_tci) {
+                       if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
+                               return -EINVAL;
+               }
+       } else {
+               *unused |= BIT(INNER_VLAN_TAG_FST);
+       }
+
+       if (fs->flow_type & FLOW_MAC_EXT) {
+               if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
+                       return -EOPNOTSUPP;
+
+               if (is_zero_ether_addr(fs->h_ext.h_dest))
+                       *unused |= BIT(INNER_DST_MAC);
+               else
+                       *unused &= ~(BIT(INNER_DST_MAC));
+       }
+
+       return 0;
+}
+
+static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
+{
+       struct hclge_fd_rule *rule = NULL;
+       struct hlist_node *node2;
+
+       hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+               if (rule->location >= location)
+                       break;
+       }
+
+       return  rule && rule->location == location;
+}
+
+static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
+                                    struct hclge_fd_rule *new_rule,
+                                    u16 location,
+                                    bool is_add)
+{
+       struct hclge_fd_rule *rule = NULL, *parent = NULL;
+       struct hlist_node *node2;
+
+       if (is_add && !new_rule)
+               return -EINVAL;
+
+       hlist_for_each_entry_safe(rule, node2,
+                                 &hdev->fd_rule_list, rule_node) {
+               if (rule->location >= location)
+                       break;
+               parent = rule;
+       }
+
+       if (rule && rule->location == location) {
+               hlist_del(&rule->rule_node);
+               kfree(rule);
+               hdev->hclge_fd_rule_num--;
+
+               if (!is_add)
+                       return 0;
+
+       } else if (!is_add) {
+               dev_err(&hdev->pdev->dev,
+                       "delete fail, rule %d is inexistent\n",
+                       location);
+               return -EINVAL;
+       }
+
+       INIT_HLIST_NODE(&new_rule->rule_node);
+
+       if (parent)
+               hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
+       else
+               hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
+
+       hdev->hclge_fd_rule_num++;
+
+       return 0;
+}
+
+static int hclge_fd_get_tuple(struct hclge_dev *hdev,
+                             struct ethtool_rx_flow_spec *fs,
+                             struct hclge_fd_rule *rule)
+{
+       u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+
+       switch (flow_type) {
+       case SCTP_V4_FLOW:
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+               rule->tuples.src_ip[3] =
+                               be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
+               rule->tuples_mask.src_ip[3] =
+                               be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
+
+               rule->tuples.dst_ip[3] =
+                               be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
+               rule->tuples_mask.dst_ip[3] =
+                               be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
+
+               rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
+               rule->tuples_mask.src_port =
+                               be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
+
+               rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
+               rule->tuples_mask.dst_port =
+                               be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
+
+               rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
+               rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
+
+               rule->tuples.ether_proto = ETH_P_IP;
+               rule->tuples_mask.ether_proto = 0xFFFF;
+
+               break;
+       case IP_USER_FLOW:
+               rule->tuples.src_ip[3] =
+                               be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
+               rule->tuples_mask.src_ip[3] =
+                               be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
+
+               rule->tuples.dst_ip[3] =
+                               be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
+               rule->tuples_mask.dst_ip[3] =
+                               be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
+
+               rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
+               rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
+
+               rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
+               rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
+
+               rule->tuples.ether_proto = ETH_P_IP;
+               rule->tuples_mask.ether_proto = 0xFFFF;
+
+               break;
+       case SCTP_V6_FLOW:
+       case TCP_V6_FLOW:
+       case UDP_V6_FLOW:
+               be32_to_cpu_array(rule->tuples.src_ip,
+                                 fs->h_u.tcp_ip6_spec.ip6src, 4);
+               be32_to_cpu_array(rule->tuples_mask.src_ip,
+                                 fs->m_u.tcp_ip6_spec.ip6src, 4);
+
+               be32_to_cpu_array(rule->tuples.dst_ip,
+                                 fs->h_u.tcp_ip6_spec.ip6dst, 4);
+               be32_to_cpu_array(rule->tuples_mask.dst_ip,
+                                 fs->m_u.tcp_ip6_spec.ip6dst, 4);
+
+               rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
+               rule->tuples_mask.src_port =
+                               be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
+
+               rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
+               rule->tuples_mask.dst_port =
+                               be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
+
+               rule->tuples.ether_proto = ETH_P_IPV6;
+               rule->tuples_mask.ether_proto = 0xFFFF;
+
+               break;
+       case IPV6_USER_FLOW:
+               be32_to_cpu_array(rule->tuples.src_ip,
+                                 fs->h_u.usr_ip6_spec.ip6src, 4);
+               be32_to_cpu_array(rule->tuples_mask.src_ip,
+                                 fs->m_u.usr_ip6_spec.ip6src, 4);
+
+               be32_to_cpu_array(rule->tuples.dst_ip,
+                                 fs->h_u.usr_ip6_spec.ip6dst, 4);
+               be32_to_cpu_array(rule->tuples_mask.dst_ip,
+                                 fs->m_u.usr_ip6_spec.ip6dst, 4);
+
+               rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
+               rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
+
+               rule->tuples.ether_proto = ETH_P_IPV6;
+               rule->tuples_mask.ether_proto = 0xFFFF;
+
+               break;
+       case ETHER_FLOW:
+               ether_addr_copy(rule->tuples.src_mac,
+                               fs->h_u.ether_spec.h_source);
+               ether_addr_copy(rule->tuples_mask.src_mac,
+                               fs->m_u.ether_spec.h_source);
+
+               ether_addr_copy(rule->tuples.dst_mac,
+                               fs->h_u.ether_spec.h_dest);
+               ether_addr_copy(rule->tuples_mask.dst_mac,
+                               fs->m_u.ether_spec.h_dest);
+
+               rule->tuples.ether_proto =
+                               be16_to_cpu(fs->h_u.ether_spec.h_proto);
+               rule->tuples_mask.ether_proto =
+                               be16_to_cpu(fs->m_u.ether_spec.h_proto);
+
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       switch (flow_type) {
+       case SCTP_V4_FLOW:
+       case SCTP_V6_FLOW:
+               rule->tuples.ip_proto = IPPROTO_SCTP;
+               rule->tuples_mask.ip_proto = 0xFF;
+               break;
+       case TCP_V4_FLOW:
+       case TCP_V6_FLOW:
+               rule->tuples.ip_proto = IPPROTO_TCP;
+               rule->tuples_mask.ip_proto = 0xFF;
+               break;
+       case UDP_V4_FLOW:
+       case UDP_V6_FLOW:
+               rule->tuples.ip_proto = IPPROTO_UDP;
+               rule->tuples_mask.ip_proto = 0xFF;
+               break;
+       default:
+               break;
+       }
+
+       if ((fs->flow_type & FLOW_EXT)) {
+               rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
+               rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
+       }
+
+       if (fs->flow_type & FLOW_MAC_EXT) {
+               ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
+               ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
+       }
+
+       return 0;
+}
+
+static int hclge_add_fd_entry(struct hnae3_handle *handle,
+                             struct ethtool_rxnfc *cmd)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       u16 dst_vport_id = 0, q_index = 0;
+       struct ethtool_rx_flow_spec *fs;
+       struct hclge_fd_rule *rule;
+       u32 unused = 0;
+       u8 action;
+       int ret;
+
+       if (!hnae3_dev_fd_supported(hdev))
+               return -EOPNOTSUPP;
+
+       if (!hdev->fd_cfg.fd_en) {
+               dev_warn(&hdev->pdev->dev,
+                        "Please enable flow director first\n");
+               return -EOPNOTSUPP;
+       }
+
+       fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+       ret = hclge_fd_check_spec(hdev, fs, &unused);
+       if (ret) {
+               dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
+               return ret;
+       }
+
+       if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
+               action = HCLGE_FD_ACTION_DROP_PACKET;
+       } else {
+               u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+               u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+               u16 tqps;
+
+               dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
+               tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
+
+               if (ring >= tqps) {
+                       dev_err(&hdev->pdev->dev,
+                               "Error: queue id (%d) > max tqp num (%d)\n",
+                               ring, tqps - 1);
+                       return -EINVAL;
+               }
+
+               if (vf > hdev->num_req_vfs) {
+                       dev_err(&hdev->pdev->dev,
+                               "Error: vf id (%d) > max vf num (%d)\n",
+                               vf, hdev->num_req_vfs);
+                       return -EINVAL;
+               }
+
+               action = HCLGE_FD_ACTION_ACCEPT_PACKET;
+               q_index = ring;
+       }
+
+       rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+       if (!rule)
+               return -ENOMEM;
+
+       ret = hclge_fd_get_tuple(hdev, fs, rule);
+       if (ret)
+               goto free_rule;
+
+       rule->flow_type = fs->flow_type;
+
+       rule->location = fs->location;
+       rule->unused_tuple = unused;
+       rule->vf_id = dst_vport_id;
+       rule->queue_id = q_index;
+       rule->action = action;
+
+       ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+       if (ret)
+               goto free_rule;
+
+       ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+       if (ret)
+               goto free_rule;
+
+       ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
+       if (ret)
+               goto free_rule;
+
+       return ret;
+
+free_rule:
+       kfree(rule);
+       return ret;
+}
+
+static int hclge_del_fd_entry(struct hnae3_handle *handle,
+                             struct ethtool_rxnfc *cmd)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       struct ethtool_rx_flow_spec *fs;
+       int ret;
+
+       if (!hnae3_dev_fd_supported(hdev))
+               return -EOPNOTSUPP;
+
+       fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+       if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+               return -EINVAL;
+
+       if (!hclge_fd_rule_exist(hdev, fs->location)) {
+               dev_err(&hdev->pdev->dev,
+                       "Delete fail, rule %d is inexistent\n",
+                       fs->location);
+               return -ENOENT;
+       }
+
+       ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+                                  fs->location, NULL, false);
+       if (ret)
+               return ret;
+
+       return hclge_fd_update_rule_list(hdev, NULL, fs->location,
+                                        false);
+}
+
+static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
+                                    bool clear_list)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       struct hclge_fd_rule *rule;
+       struct hlist_node *node;
+
+       if (!hnae3_dev_fd_supported(hdev))
+               return;
+
+       if (clear_list) {
+               hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
+                                         rule_node) {
+                       hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+                                            rule->location, NULL, false);
+                       hlist_del(&rule->rule_node);
+                       kfree(rule);
+                       hdev->hclge_fd_rule_num--;
+               }
+       } else {
+               hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
+                                         rule_node)
+                       hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+                                            rule->location, NULL, false);
+       }
+}
+
+static int hclge_restore_fd_entries(struct hnae3_handle *handle)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       struct hclge_fd_rule *rule;
+       struct hlist_node *node;
+       int ret;
+
+       if (!hnae3_dev_fd_supported(hdev))
+               return -EOPNOTSUPP;
+
+       hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+               ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+               if (!ret)
+                       ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+
+               if (ret) {
+                       dev_warn(&hdev->pdev->dev,
+                                "Restore rule %d failed, remove it\n",
+                                rule->location);
+                       hlist_del(&rule->rule_node);
+                       kfree(rule);
+                       hdev->hclge_fd_rule_num--;
+               }
+       }
+       return 0;
+}
+
+static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
+                                struct ethtool_rxnfc *cmd)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+
+       if (!hnae3_dev_fd_supported(hdev))
+               return -EOPNOTSUPP;
+
+       cmd->rule_cnt = hdev->hclge_fd_rule_num;
+       cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+
+       return 0;
+}
+
+static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
+                                 struct ethtool_rxnfc *cmd)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_fd_rule *rule = NULL;
+       struct hclge_dev *hdev = vport->back;
+       struct ethtool_rx_flow_spec *fs;
+       struct hlist_node *node2;
+
+       if (!hnae3_dev_fd_supported(hdev))
+               return -EOPNOTSUPP;
+
+       fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+       hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+               if (rule->location >= fs->location)
+                       break;
+       }
+
+       if (!rule || fs->location != rule->location)
+               return -ENOENT;
+
+       fs->flow_type = rule->flow_type;
+       switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+       case SCTP_V4_FLOW:
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+               fs->h_u.tcp_ip4_spec.ip4src =
+                               cpu_to_be32(rule->tuples.src_ip[3]);
+               fs->m_u.tcp_ip4_spec.ip4src =
+                               rule->unused_tuple & BIT(INNER_SRC_IP) ?
+                               0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
+
+               fs->h_u.tcp_ip4_spec.ip4dst =
+                               cpu_to_be32(rule->tuples.dst_ip[3]);
+               fs->m_u.tcp_ip4_spec.ip4dst =
+                               rule->unused_tuple & BIT(INNER_DST_IP) ?
+                               0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
+
+               fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
+               fs->m_u.tcp_ip4_spec.psrc =
+                               rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+                               0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+               fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
+               fs->m_u.tcp_ip4_spec.pdst =
+                               rule->unused_tuple & BIT(INNER_DST_PORT) ?
+                               0 : cpu_to_be16(rule->tuples_mask.dst_port);
+
+               fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
+               fs->m_u.tcp_ip4_spec.tos =
+                               rule->unused_tuple & BIT(INNER_IP_TOS) ?
+                               0 : rule->tuples_mask.ip_tos;
+
+               break;
+       case IP_USER_FLOW:
+               fs->h_u.usr_ip4_spec.ip4src =
+                               cpu_to_be32(rule->tuples.src_ip[3]);
+               fs->m_u.tcp_ip4_spec.ip4src =
+                               rule->unused_tuple & BIT(INNER_SRC_IP) ?
+                               0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
+
+               fs->h_u.usr_ip4_spec.ip4dst =
+                               cpu_to_be32(rule->tuples.dst_ip[3]);
+               fs->m_u.usr_ip4_spec.ip4dst =
+                               rule->unused_tuple & BIT(INNER_DST_IP) ?
+                               0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
+
+               fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
+               fs->m_u.usr_ip4_spec.tos =
+                               rule->unused_tuple & BIT(INNER_IP_TOS) ?
+                               0 : rule->tuples_mask.ip_tos;
+
+               fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
+               fs->m_u.usr_ip4_spec.proto =
+                               rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+                               0 : rule->tuples_mask.ip_proto;
+
+               fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+
+               break;
+       case SCTP_V6_FLOW:
+       case TCP_V6_FLOW:
+       case UDP_V6_FLOW:
+               cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
+                                 rule->tuples.src_ip, 4);
+               if (rule->unused_tuple & BIT(INNER_SRC_IP))
+                       memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
+               else
+                       cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
+                                         rule->tuples_mask.src_ip, 4);
+
+               cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
+                                 rule->tuples.dst_ip, 4);
+               if (rule->unused_tuple & BIT(INNER_DST_IP))
+                       memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
+               else
+                       cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
+                                         rule->tuples_mask.dst_ip, 4);
+
+               fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
+               fs->m_u.tcp_ip6_spec.psrc =
+                               rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+                               0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+               fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
+               fs->m_u.tcp_ip6_spec.pdst =
+                               rule->unused_tuple & BIT(INNER_DST_PORT) ?
+                               0 : cpu_to_be16(rule->tuples_mask.dst_port);
+
+               break;
+       case IPV6_USER_FLOW:
+               cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
+                                 rule->tuples.src_ip, 4);
+               if (rule->unused_tuple & BIT(INNER_SRC_IP))
+                       memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
+               else
+                       cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
+                                         rule->tuples_mask.src_ip, 4);
+
+               cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
+                                 rule->tuples.dst_ip, 4);
+               if (rule->unused_tuple & BIT(INNER_DST_IP))
+                       memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
+               else
+                       cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
+                                         rule->tuples_mask.dst_ip, 4);
+
+               fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
+               fs->m_u.usr_ip6_spec.l4_proto =
+                               rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+                               0 : rule->tuples_mask.ip_proto;
+
+               break;
+       case ETHER_FLOW:
+               ether_addr_copy(fs->h_u.ether_spec.h_source,
+                               rule->tuples.src_mac);
+               if (rule->unused_tuple & BIT(INNER_SRC_MAC))
+                       eth_zero_addr(fs->m_u.ether_spec.h_source);
+               else
+                       ether_addr_copy(fs->m_u.ether_spec.h_source,
+                                       rule->tuples_mask.src_mac);
+
+               ether_addr_copy(fs->h_u.ether_spec.h_dest,
+                               rule->tuples.dst_mac);
+               if (rule->unused_tuple & BIT(INNER_DST_MAC))
+                       eth_zero_addr(fs->m_u.ether_spec.h_dest);
+               else
+                       ether_addr_copy(fs->m_u.ether_spec.h_dest,
+                                       rule->tuples_mask.dst_mac);
+
+               fs->h_u.ether_spec.h_proto =
+                               cpu_to_be16(rule->tuples.ether_proto);
+               fs->m_u.ether_spec.h_proto =
+                               rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
+                               0 : cpu_to_be16(rule->tuples_mask.ether_proto);
+
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       if (fs->flow_type & FLOW_EXT) {
+               fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
+               fs->m_ext.vlan_tci =
+                               rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
+                               cpu_to_be16(VLAN_VID_MASK) :
+                               cpu_to_be16(rule->tuples_mask.vlan_tag1);
+       }
+
+       if (fs->flow_type & FLOW_MAC_EXT) {
+               ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
+               if (rule->unused_tuple & BIT(INNER_DST_MAC))
+                       eth_zero_addr(fs->m_u.ether_spec.h_dest);
+               else
+                       ether_addr_copy(fs->m_u.ether_spec.h_dest,
+                                       rule->tuples_mask.dst_mac);
+       }
+
+       if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+               fs->ring_cookie = RX_CLS_FLOW_DISC;
+       } else {
+               u64 vf_id;
+
+               fs->ring_cookie = rule->queue_id;
+               vf_id = rule->vf_id;
+               vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+               fs->ring_cookie |= vf_id;
+       }
+
+       return 0;
+}
+
+static int hclge_get_all_rules(struct hnae3_handle *handle,
+                              struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       struct hclge_fd_rule *rule;
+       struct hlist_node *node2;
+       int cnt = 0;
+
+       if (!hnae3_dev_fd_supported(hdev))
+               return -EOPNOTSUPP;
+
+       cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+
+       hlist_for_each_entry_safe(rule, node2,
+                                 &hdev->fd_rule_list, rule_node) {
+               if (cnt == cmd->rule_cnt)
+                       return -EMSGSIZE;
+
+               rule_locs[cnt] = rule->location;
+               cnt++;
+       }
+
+       cmd->rule_cnt = cnt;
+
+       return 0;
+}
+
+static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+
+       hdev->fd_cfg.fd_en = enable;
+       if (!enable)
+               hclge_del_all_fd_entries(handle, false);
+       else
+               hclge_restore_fd_entries(handle);
+}
+
+static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
+{
+       struct hclge_desc desc;
+       struct hclge_config_mac_mode_cmd *req =
+               (struct hclge_config_mac_mode_cmd *)desc.data;
+       u32 loop_en = 0;
+       int ret;
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
+       hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
+       req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
+
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret)
+               dev_err(&hdev->pdev->dev,
+                       "mac enable fail, ret =%d.\n", ret);
+}
+
+static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
+{
+       struct hclge_config_mac_mode_cmd *req;
+       struct hclge_desc desc;
+       u32 loop_en;
+       int ret;
+
+       req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
+       /* 1 Read out the MAC mode config at first */
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "mac loopback get fail, ret =%d.\n", ret);
+               return ret;
+       }
+
+       /* 2 Then setup the loopback flag */
+       loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
+       hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
+
+       req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
+
+       /* 3 Config mac work mode with loopback flag
+        * and its original configure parameters
+        */
+       hclge_cmd_reuse_desc(&desc, false);
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret)
+               dev_err(&hdev->pdev->dev,
+                       "mac loopback set fail, ret =%d.\n", ret);
+       return ret;
+}
+
+static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
+                                    enum hnae3_loop loop_mode)
+{
+#define HCLGE_SERDES_RETRY_MS  10
+#define HCLGE_SERDES_RETRY_NUM 100
+       struct hclge_serdes_lb_cmd *req;
+       struct hclge_desc desc;
+       int ret, i = 0;
+       u8 loop_mode_b;
+
+       req = (struct hclge_serdes_lb_cmd *)desc.data;
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
+
+       switch (loop_mode) {
+       case HNAE3_LOOP_SERIAL_SERDES:
+               loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+               break;
+       case HNAE3_LOOP_PARALLEL_SERDES:
+               loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
+               break;
+       default:
+               dev_err(&hdev->pdev->dev,
+                       "unsupported serdes loopback mode %d\n", loop_mode);
+               return -ENOTSUPP;
+       }
+
+       if (en) {
+               req->enable = loop_mode_b;
+               req->mask = loop_mode_b;
+       } else {
+               req->mask = loop_mode_b;
+       }
+
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "serdes loopback set fail, ret = %d\n", ret);
+               return ret;
+       }
+
+       do {
+               msleep(HCLGE_SERDES_RETRY_MS);
+               hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
+                                          true);
+               ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "serdes loopback get, ret = %d\n", ret);
+                       return ret;
+               }
+       } while (++i < HCLGE_SERDES_RETRY_NUM &&
+                !(req->result & HCLGE_CMD_SERDES_DONE_B));
+
+       if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
+               dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
+               return -EBUSY;
+       } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
+               dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
+               return -EIO;
+       }
+
+       hclge_cfg_mac_mode(hdev, en);
+       return 0;
+}
+
+static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
+                           int stream_id, bool enable)
+{
+       struct hclge_desc desc;
+       struct hclge_cfg_com_tqp_queue_cmd *req =
+               (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
+       int ret;
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
+       req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
+       req->stream_id = cpu_to_le16(stream_id);
+       req->enable |= enable << HCLGE_TQP_ENABLE_B;
+
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret)
+               dev_err(&hdev->pdev->dev,
+                       "Tqp enable fail, status =%d.\n", ret);
+       return ret;
+}
+
+static int hclge_set_loopback(struct hnae3_handle *handle,
+                             enum hnae3_loop loop_mode, bool en)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       int i, ret;
+
+       switch (loop_mode) {
+       case HNAE3_LOOP_APP:
+               ret = hclge_set_app_loopback(hdev, en);
+               break;
+       case HNAE3_LOOP_SERIAL_SERDES:
+       case HNAE3_LOOP_PARALLEL_SERDES:
+               ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
+               break;
+       default:
+               ret = -ENOTSUPP;
+               dev_err(&hdev->pdev->dev,
+                       "loop_mode %d is not supported\n", loop_mode);
+               break;
+       }
+
+       for (i = 0; i < vport->alloc_tqps; i++) {
+               ret = hclge_tqp_enable(hdev, i, 0, en);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hnae3_queue *queue;
+       struct hclge_tqp *tqp;
+       int i;
+
+       for (i = 0; i < vport->alloc_tqps; i++) {
+               queue = handle->kinfo.tqp[i];
+               tqp = container_of(queue, struct hclge_tqp, q);
+               memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
+       }
+}
+
+static int hclge_ae_start(struct hnae3_handle *handle)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       int i;
+
+       for (i = 0; i < vport->alloc_tqps; i++)
+               hclge_tqp_enable(hdev, i, 0, true);
+
+       /* mac enable */
+       hclge_cfg_mac_mode(hdev, true);
+       clear_bit(HCLGE_STATE_DOWN, &hdev->state);
+       mod_timer(&hdev->service_timer, jiffies + HZ);
+       hdev->hw.mac.link = 0;
+
+       /* reset tqp stats */
+       hclge_reset_tqp_stats(handle);
+
+       hclge_mac_start_phy(hdev);
+
+       return 0;
+}
+
+static void hclge_ae_stop(struct hnae3_handle *handle)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       int i;
+
+       set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+       del_timer_sync(&hdev->service_timer);
+       cancel_work_sync(&hdev->service_task);
+       clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+
+       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+               hclge_mac_stop_phy(hdev);
+               return;
+       }
+
+       for (i = 0; i < vport->alloc_tqps; i++)
+               hclge_tqp_enable(hdev, i, 0, false);
+
+       /* Mac disable */
+       hclge_cfg_mac_mode(hdev, false);
+
+       hclge_mac_stop_phy(hdev);
+
+       /* reset tqp stats */
+       hclge_reset_tqp_stats(handle);
+       del_timer_sync(&hdev->service_timer);
+       cancel_work_sync(&hdev->service_task);
+       hclge_update_link_status(hdev);
+}
+
+static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
+                                        u16 cmdq_resp, u8  resp_code,
+                                        enum hclge_mac_vlan_tbl_opcode op)
+{
+       struct hclge_dev *hdev = vport->back;
+       int return_status = -EIO;
+
+       if (cmdq_resp) {
+               dev_err(&hdev->pdev->dev,
+                       "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
+                       cmdq_resp);
+               return -EIO;
+       }
+
+       if (op == HCLGE_MAC_VLAN_ADD) {
+               if ((!resp_code) || (resp_code == 1)) {
+                       return_status = 0;
+               } else if (resp_code == 2) {
+                       return_status = -ENOSPC;
                        dev_err(&hdev->pdev->dev,
                                "add mac addr failed for uc_overflow.\n");
                } else if (resp_code == 3) {
@@ -3667,174 +4923,6 @@ static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
        new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
 }
 
-static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
-                                          const u8 *addr)
-{
-       u16 high_val = addr[1] | (addr[0] << 8);
-       struct hclge_dev *hdev = vport->back;
-       u32 rsh = 4 - hdev->mta_mac_sel_type;
-       u16 ret_val = (high_val >> rsh) & 0xfff;
-
-       return ret_val;
-}
-
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
-                                    enum hclge_mta_dmac_sel_type mta_mac_sel,
-                                    bool enable)
-{
-       struct hclge_mta_filter_mode_cmd *req;
-       struct hclge_desc desc;
-       int ret;
-
-       req = (struct hclge_mta_filter_mode_cmd *)desc.data;
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
-
-       hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
-                     enable);
-       hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
-                       HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
-
-       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret)
-               dev_err(&hdev->pdev->dev,
-                       "Config mat filter mode failed for cmd_send, ret =%d.\n",
-                       ret);
-
-       return ret;
-}
-
-int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
-                             u8 func_id,
-                             bool enable)
-{
-       struct hclge_cfg_func_mta_filter_cmd *req;
-       struct hclge_desc desc;
-       int ret;
-
-       req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
-
-       hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
-                     enable);
-       req->function_id = func_id;
-
-       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret)
-               dev_err(&hdev->pdev->dev,
-                       "Config func_id enable failed for cmd_send, ret =%d.\n",
-                       ret);
-
-       return ret;
-}
-
-static int hclge_set_mta_table_item(struct hclge_vport *vport,
-                                   u16 idx,
-                                   bool enable)
-{
-       struct hclge_dev *hdev = vport->back;
-       struct hclge_cfg_func_mta_item_cmd *req;
-       struct hclge_desc desc;
-       u16 item_idx = 0;
-       int ret;
-
-       req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
-       hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
-
-       hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
-                       HCLGE_CFG_MTA_ITEM_IDX_S, idx);
-       req->item_idx = cpu_to_le16(item_idx);
-
-       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "Config mta table item failed for cmd_send, ret =%d.\n",
-                       ret);
-               return ret;
-       }
-
-       if (enable)
-               set_bit(idx, vport->mta_shadow);
-       else
-               clear_bit(idx, vport->mta_shadow);
-
-       return 0;
-}
-
-static int hclge_update_mta_status(struct hnae3_handle *handle)
-{
-       unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
-       struct hclge_vport *vport = hclge_get_vport(handle);
-       struct net_device *netdev = handle->kinfo.netdev;
-       struct netdev_hw_addr *ha;
-       u16 tbl_idx;
-
-       memset(mta_status, 0, sizeof(mta_status));
-
-       /* update mta_status from mc addr list */
-       netdev_for_each_mc_addr(ha, netdev) {
-               tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
-               set_bit(tbl_idx, mta_status);
-       }
-
-       return hclge_update_mta_status_common(vport, mta_status,
-                                       0, HCLGE_MTA_TBL_SIZE, true);
-}
-
-int hclge_update_mta_status_common(struct hclge_vport *vport,
-                                  unsigned long *status,
-                                  u16 idx,
-                                  u16 count,
-                                  bool update_filter)
-{
-       struct hclge_dev *hdev = vport->back;
-       u16 update_max = idx + count;
-       u16 check_max;
-       int ret = 0;
-       bool used;
-       u16 i;
-
-       /* setup mta check range */
-       if (update_filter) {
-               i = 0;
-               check_max = HCLGE_MTA_TBL_SIZE;
-       } else {
-               i = idx;
-               check_max = update_max;
-       }
-
-       used = false;
-       /* check and update all mta item */
-       for (; i < check_max; i++) {
-               /* ignore unused item */
-               if (!test_bit(i, vport->mta_shadow))
-                       continue;
-
-               /* if i in update range then update it */
-               if (i >= idx && i < update_max)
-                       if (!test_bit(i - idx, status))
-                               hclge_set_mta_table_item(vport, i, false);
-
-               if (!used && test_bit(i, vport->mta_shadow))
-                       used = true;
-       }
-
-       /* no longer use mta, disable it */
-       if (vport->accept_mta_mc && update_filter && !used) {
-               ret = hclge_cfg_func_mta_filter(hdev,
-                                               vport->vport_id,
-                                               false);
-               if (ret)
-                       dev_err(&hdev->pdev->dev,
-                               "disable func mta filter fail ret=%d\n",
-                               ret);
-               else
-                       vport->accept_mta_mc = false;
-       }
-
-       return ret;
-}
-
 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
                                     struct hclge_mac_vlan_tbl_entry_cmd *req)
 {
@@ -3958,6 +5046,118 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
        return cfg_status;
 }
 
+static int hclge_init_umv_space(struct hclge_dev *hdev)
+{
+       u16 allocated_size = 0;
+       int ret;
+
+       ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
+                                 true);
+       if (ret)
+               return ret;
+
+       if (allocated_size < hdev->wanted_umv_size)
+               dev_warn(&hdev->pdev->dev,
+                        "Alloc umv space failed, want %d, get %d\n",
+                        hdev->wanted_umv_size, allocated_size);
+
+       mutex_init(&hdev->umv_mutex);
+       hdev->max_umv_size = allocated_size;
+       hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
+       hdev->share_umv_size = hdev->priv_umv_size +
+                       hdev->max_umv_size % (hdev->num_req_vfs + 2);
+
+       return 0;
+}
+
+static int hclge_uninit_umv_space(struct hclge_dev *hdev)
+{
+       int ret;
+
+       if (hdev->max_umv_size > 0) {
+               ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
+                                         false);
+               if (ret)
+                       return ret;
+               hdev->max_umv_size = 0;
+       }
+       mutex_destroy(&hdev->umv_mutex);
+
+       return 0;
+}
+
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
+                              u16 *allocated_size, bool is_alloc)
+{
+       struct hclge_umv_spc_alc_cmd *req;
+       struct hclge_desc desc;
+       int ret;
+
+       req = (struct hclge_umv_spc_alc_cmd *)desc.data;
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
+       hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
+       req->space_size = cpu_to_le32(space_size);
+
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "%s umv space failed for cmd_send, ret =%d\n",
+                       is_alloc ? "allocate" : "free", ret);
+               return ret;
+       }
+
+       if (is_alloc && allocated_size)
+               *allocated_size = le32_to_cpu(desc.data[1]);
+
+       return 0;
+}
+
+static void hclge_reset_umv_space(struct hclge_dev *hdev)
+{
+       struct hclge_vport *vport;
+       int i;
+
+       for (i = 0; i < hdev->num_alloc_vport; i++) {
+               vport = &hdev->vport[i];
+               vport->used_umv_num = 0;
+       }
+
+       mutex_lock(&hdev->umv_mutex);
+       hdev->share_umv_size = hdev->priv_umv_size +
+                       hdev->max_umv_size % (hdev->num_req_vfs + 2);
+       mutex_unlock(&hdev->umv_mutex);
+}
+
+static bool hclge_is_umv_space_full(struct hclge_vport *vport)
+{
+       struct hclge_dev *hdev = vport->back;
+       bool is_full;
+
+       mutex_lock(&hdev->umv_mutex);
+       is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
+                  hdev->share_umv_size == 0);
+       mutex_unlock(&hdev->umv_mutex);
+
+       return is_full;
+}
+
+static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
+{
+       struct hclge_dev *hdev = vport->back;
+
+       mutex_lock(&hdev->umv_mutex);
+       if (is_free) {
+               if (vport->used_umv_num > hdev->priv_umv_size)
+                       hdev->share_umv_size++;
+               vport->used_umv_num--;
+       } else {
+               if (vport->used_umv_num >= hdev->priv_umv_size)
+                       hdev->share_umv_size--;
+               vport->used_umv_num++;
+       }
+       mutex_unlock(&hdev->umv_mutex);
+}
+
 static int hclge_add_uc_addr(struct hnae3_handle *handle,
                             const unsigned char *addr)
 {
@@ -4003,8 +5203,19 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
         * is not allowed in the mac vlan table.
         */
        ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
-       if (ret == -ENOENT)
-               return hclge_add_mac_vlan_tbl(vport, &req, NULL);
+       if (ret == -ENOENT) {
+               if (!hclge_is_umv_space_full(vport)) {
+                       ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
+                       if (!ret)
+                               hclge_update_umv_space(vport, false);
+                       return ret;
+               }
+
+               dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
+                       hdev->priv_umv_size);
+
+               return -ENOSPC;
+       }
 
        /* check if we just hit the duplicate */
        if (!ret)
@@ -4047,6 +5258,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
        hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
        hclge_prepare_mac_addr(&req, addr);
        ret = hclge_remove_mac_vlan_tbl(vport, &req);
+       if (!ret)
+               hclge_update_umv_space(vport, true);
 
        return ret;
 }
@@ -4065,7 +5278,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
        struct hclge_dev *hdev = vport->back;
        struct hclge_mac_vlan_tbl_entry_cmd req;
        struct hclge_desc desc[3];
-       u16 tbl_idx;
        int status;
 
        /* mac addr check */
@@ -4095,25 +5307,8 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
                status = hclge_add_mac_vlan_tbl(vport, &req, desc);
        }
 
-       /* If mc mac vlan table is full, use MTA table */
-       if (status == -ENOSPC) {
-               if (!vport->accept_mta_mc) {
-                       status = hclge_cfg_func_mta_filter(hdev,
-                                                          vport->vport_id,
-                                                          true);
-                       if (status) {
-                               dev_err(&hdev->pdev->dev,
-                                       "set mta filter mode fail ret=%d\n",
-                                       status);
-                               return status;
-                       }
-                       vport->accept_mta_mc = true;
-               }
-
-               /* Set MTA table for this MAC address */
-               tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
-               status = hclge_set_mta_table_item(vport, tbl_idx, true);
-       }
+       if (status == -ENOSPC)
+               dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
 
        return status;
 }
@@ -4328,7 +5523,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
 }
 
 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
-                                     bool filter_en)
+                                     u8 fe_type, bool filter_en)
 {
        struct hclge_vlan_filter_ctrl_cmd *req;
        struct hclge_desc desc;
@@ -4338,7 +5533,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
 
        req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
        req->vlan_type = vlan_type;
-       req->vlan_fe = filter_en;
+       req->vlan_fe = filter_en ? fe_type : 0;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
@@ -4350,13 +5545,30 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
 
 #define HCLGE_FILTER_TYPE_VF           0
 #define HCLGE_FILTER_TYPE_PORT         1
+#define HCLGE_FILTER_FE_EGRESS_V1_B    BIT(0)
+#define HCLGE_FILTER_FE_NIC_INGRESS_B  BIT(0)
+#define HCLGE_FILTER_FE_NIC_EGRESS_B   BIT(1)
+#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
+#define HCLGE_FILTER_FE_ROCE_EGRESS_B  BIT(3)
+#define HCLGE_FILTER_FE_EGRESS         (HCLGE_FILTER_FE_NIC_EGRESS_B \
+                                       | HCLGE_FILTER_FE_ROCE_EGRESS_B)
+#define HCLGE_FILTER_FE_INGRESS                (HCLGE_FILTER_FE_NIC_INGRESS_B \
+                                       | HCLGE_FILTER_FE_ROCE_INGRESS_B)
 
 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
 
-       hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
+       if (hdev->pdev->revision >= 0x21) {
+               hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+                                          HCLGE_FILTER_FE_EGRESS, enable);
+               hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+                                          HCLGE_FILTER_FE_INGRESS, enable);
+       } else {
+               hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+                                          HCLGE_FILTER_FE_EGRESS_V1_B, enable);
+       }
 }
 
 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
@@ -4658,13 +5870,23 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
        int ret;
        int i;
 
-       ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
-       if (ret)
-               return ret;
+       if (hdev->pdev->revision >= 0x21) {
+               ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+                                                HCLGE_FILTER_FE_EGRESS, true);
+               if (ret)
+                       return ret;
 
-       ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
-       if (ret)
-               return ret;
+               ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+                                                HCLGE_FILTER_FE_INGRESS, true);
+               if (ret)
+                       return ret;
+       } else {
+               ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+                                                HCLGE_FILTER_FE_EGRESS_V1_B,
+                                                true);
+               if (ret)
+                       return ret;
+       }
 
        hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
        hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
@@ -4976,11 +6198,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
        if (!phydev->link || !phydev->autoneg)
                return 0;
 
-       if (phydev->advertising & ADVERTISED_Pause)
-               local_advertising = ADVERTISE_PAUSE_CAP;
-
-       if (phydev->advertising & ADVERTISED_Asym_Pause)
-               local_advertising |= ADVERTISE_PAUSE_ASYM;
+       local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising);
 
        if (phydev->pause)
                remote_advertising = LPA_PAUSE_CAP;
@@ -5439,6 +6657,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
                }
        }
 
+       ret = hclge_init_umv_space(hdev);
+       if (ret) {
+               dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
+               goto err_msi_irq_uninit;
+       }
+
        ret = hclge_mac_init(hdev);
        if (ret) {
                dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -5476,6 +6700,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
                goto err_mdiobus_unreg;
        }
 
+       ret = hclge_init_fd_config(hdev);
+       if (ret) {
+               dev_err(&pdev->dev,
+                       "fd table init fail, ret=%d\n", ret);
+               goto err_mdiobus_unreg;
+       }
+
        hclge_dcb_ops_set(hdev);
 
        timer_setup(&hdev->service_timer, hclge_service_timer, 0);
@@ -5552,6 +6783,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
                return ret;
        }
 
+       hclge_reset_umv_space(hdev);
+
        ret = hclge_mac_init(hdev);
        if (ret) {
                dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -5582,6 +6815,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
                return ret;
        }
 
+       ret = hclge_init_fd_config(hdev);
+       if (ret) {
+               dev_err(&pdev->dev,
+                       "fd table init fail, ret=%d\n", ret);
+               return ret;
+       }
+
        dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
                 HCLGE_DRIVER_NAME);
 
@@ -5598,6 +6838,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
        if (mac->phydev)
                mdiobus_unregister(mac->mdio_bus);
 
+       hclge_uninit_umv_space(hdev);
+
        /* Disable MISC vector(vector0) */
        hclge_enable_vector(&hdev->misc_vector, false);
        synchronize_irq(hdev->misc_vector.vector_irq);
@@ -5629,18 +6871,12 @@ static void hclge_get_channels(struct hnae3_handle *handle,
 }
 
 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
-                                       u16 *free_tqps, u16 *max_rss_size)
+                                       u16 *alloc_tqps, u16 *max_rss_size)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
-       u16 temp_tqps = 0;
-       int i;
 
-       for (i = 0; i < hdev->num_tqps; i++) {
-               if (!hdev->htqp[i].alloced)
-                       temp_tqps++;
-       }
-       *free_tqps = temp_tqps;
+       *alloc_tqps = vport->alloc_tqps;
        *max_rss_size = hdev->rss_size_max;
 }
 
@@ -6002,7 +7238,6 @@ static const struct hnae3_ae_ops hclge_ops = {
        .rm_uc_addr = hclge_rm_uc_addr,
        .add_mc_addr = hclge_add_mc_addr,
        .rm_mc_addr = hclge_rm_mc_addr,
-       .update_mta_status = hclge_update_mta_status,
        .set_autoneg = hclge_set_autoneg,
        .get_autoneg = hclge_get_autoneg,
        .get_pauseparam = hclge_get_pauseparam,
@@ -6027,6 +7262,14 @@ static const struct hnae3_ae_ops hclge_ops = {
        .get_regs = hclge_get_regs,
        .set_led_id = hclge_set_led_id,
        .get_link_mode = hclge_get_link_mode,
+       .add_fd_entry = hclge_add_fd_entry,
+       .del_fd_entry = hclge_del_fd_entry,
+       .del_all_fd_entries = hclge_del_all_fd_entries,
+       .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
+       .get_fd_rule_info = hclge_get_fd_rule_info,
+       .get_fd_all_rules = hclge_get_all_rules,
+       .restore_fd_rules = hclge_restore_fd_entries,
+       .enable_fd = hclge_enable_fd,
 };
 
 static struct hnae3_ae_algo ae_algo = {
index 7841b830a71665cf091ec43b14bc378c9c0e3d2c..e3dfd654eca9a1c4b1f0af7a90b29d91c725ca1b 100644 (file)
@@ -14,6 +14,8 @@
 #define HCLGE_MOD_VERSION "1.0"
 #define HCLGE_DRIVER_NAME "hclge"
 
+#define HCLGE_MAX_PF_NUM               8
+
 #define HCLGE_INVALID_VPORT 0xffff
 
 #define HCLGE_PF_CFG_BLOCK_SIZE                32
@@ -53,7 +55,9 @@
 #define HCLGE_RSS_TC_SIZE_6            64
 #define HCLGE_RSS_TC_SIZE_7            128
 
-#define HCLGE_MTA_TBL_SIZE             4096
+#define HCLGE_UMV_TBL_SIZE             3072
+#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
+       (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
 
 #define HCLGE_TQP_RESET_TRY_TIMES      10
 
 #define HCLGE_VF_NUM_PER_CMD           64
 #define HCLGE_VF_NUM_PER_BYTE          8
 
+enum HLCGE_PORT_TYPE {
+       HOST_PORT,
+       NETWORK_PORT
+};
+
+#define HCLGE_PF_ID_S                  0
+#define HCLGE_PF_ID_M                  GENMASK(2, 0)
+#define HCLGE_VF_ID_S                  3
+#define HCLGE_VF_ID_M                  GENMASK(10, 3)
+#define HCLGE_PORT_TYPE_B              11
+#define HCLGE_NETWORK_PORT_ID_S                0
+#define HCLGE_NETWORK_PORT_ID_M                GENMASK(3, 0)
+
 /* Reset related Registers */
 #define HCLGE_MISC_RESET_STS_REG       0x20700
 #define HCLGE_MISC_VECTOR_INT_STS      0x20800
@@ -149,13 +166,6 @@ enum HCLGE_MAC_DUPLEX {
        HCLGE_MAC_FULL
 };
 
-enum hclge_mta_dmac_sel_type {
-       HCLGE_MAC_ADDR_47_36,
-       HCLGE_MAC_ADDR_46_35,
-       HCLGE_MAC_ADDR_45_34,
-       HCLGE_MAC_ADDR_44_33,
-};
-
 struct hclge_mac {
        u8 phy_addr;
        u8 flag;
@@ -238,6 +248,7 @@ struct hclge_cfg {
        u8 default_speed;
        u32 numa_node_map;
        u8 speed_ability;
+       u16 umv_space;
 };
 
 struct hclge_tm_info {
@@ -359,6 +370,221 @@ struct hclge_vlan_type_cfg {
        u16 tx_in_vlan_type;
 };
 
+enum HCLGE_FD_MODE {
+       HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1,
+       HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2,
+       HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1,
+       HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2,
+};
+
+enum HCLGE_FD_KEY_TYPE {
+       HCLGE_FD_KEY_BASE_ON_PTYPE,
+       HCLGE_FD_KEY_BASE_ON_TUPLE,
+};
+
+enum HCLGE_FD_STAGE {
+       HCLGE_FD_STAGE_1,
+       HCLGE_FD_STAGE_2,
+};
+
+/* OUTER_XXX indicates tuples in tunnel header of tunnel packet
+ * INNER_XXX indicate tuples in tunneled header of tunnel packet or
+ *           tuples of non-tunnel packet
+ */
+enum HCLGE_FD_TUPLE {
+       OUTER_DST_MAC,
+       OUTER_SRC_MAC,
+       OUTER_VLAN_TAG_FST,
+       OUTER_VLAN_TAG_SEC,
+       OUTER_ETH_TYPE,
+       OUTER_L2_RSV,
+       OUTER_IP_TOS,
+       OUTER_IP_PROTO,
+       OUTER_SRC_IP,
+       OUTER_DST_IP,
+       OUTER_L3_RSV,
+       OUTER_SRC_PORT,
+       OUTER_DST_PORT,
+       OUTER_L4_RSV,
+       OUTER_TUN_VNI,
+       OUTER_TUN_FLOW_ID,
+       INNER_DST_MAC,
+       INNER_SRC_MAC,
+       INNER_VLAN_TAG_FST,
+       INNER_VLAN_TAG_SEC,
+       INNER_ETH_TYPE,
+       INNER_L2_RSV,
+       INNER_IP_TOS,
+       INNER_IP_PROTO,
+       INNER_SRC_IP,
+       INNER_DST_IP,
+       INNER_L3_RSV,
+       INNER_SRC_PORT,
+       INNER_DST_PORT,
+       INNER_L4_RSV,
+       MAX_TUPLE,
+};
+
+enum HCLGE_FD_META_DATA {
+       PACKET_TYPE_ID,
+       IP_FRAGEMENT,
+       ROCE_TYPE,
+       NEXT_KEY,
+       VLAN_NUMBER,
+       SRC_VPORT,
+       DST_VPORT,
+       TUNNEL_PACKET,
+       MAX_META_DATA,
+};
+
+struct key_info {
+       u8 key_type;
+       u8 key_length;
+};
+
+static const struct key_info meta_data_key_info[] = {
+       { PACKET_TYPE_ID, 6},
+       { IP_FRAGEMENT, 1},
+       { ROCE_TYPE, 1},
+       { NEXT_KEY, 5},
+       { VLAN_NUMBER, 2},
+       { SRC_VPORT, 12},
+       { DST_VPORT, 12},
+       { TUNNEL_PACKET, 1},
+};
+
+static const struct key_info tuple_key_info[] = {
+       { OUTER_DST_MAC, 48},
+       { OUTER_SRC_MAC, 48},
+       { OUTER_VLAN_TAG_FST, 16},
+       { OUTER_VLAN_TAG_SEC, 16},
+       { OUTER_ETH_TYPE, 16},
+       { OUTER_L2_RSV, 16},
+       { OUTER_IP_TOS, 8},
+       { OUTER_IP_PROTO, 8},
+       { OUTER_SRC_IP, 32},
+       { OUTER_DST_IP, 32},
+       { OUTER_L3_RSV, 16},
+       { OUTER_SRC_PORT, 16},
+       { OUTER_DST_PORT, 16},
+       { OUTER_L4_RSV, 32},
+       { OUTER_TUN_VNI, 24},
+       { OUTER_TUN_FLOW_ID, 8},
+       { INNER_DST_MAC, 48},
+       { INNER_SRC_MAC, 48},
+       { INNER_VLAN_TAG_FST, 16},
+       { INNER_VLAN_TAG_SEC, 16},
+       { INNER_ETH_TYPE, 16},
+       { INNER_L2_RSV, 16},
+       { INNER_IP_TOS, 8},
+       { INNER_IP_PROTO, 8},
+       { INNER_SRC_IP, 32},
+       { INNER_DST_IP, 32},
+       { INNER_L3_RSV, 16},
+       { INNER_SRC_PORT, 16},
+       { INNER_DST_PORT, 16},
+       { INNER_L4_RSV, 32},
+};
+
+#define MAX_KEY_LENGTH 400
+#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
+#define MAX_KEY_BYTES  (MAX_KEY_DWORDS * 4)
+#define MAX_META_DATA_LENGTH   32
+
+enum HCLGE_FD_PACKET_TYPE {
+       NIC_PACKET,
+       ROCE_PACKET,
+};
+
+enum HCLGE_FD_ACTION {
+       HCLGE_FD_ACTION_ACCEPT_PACKET,
+       HCLGE_FD_ACTION_DROP_PACKET,
+};
+
+struct hclge_fd_key_cfg {
+       u8 key_sel;
+       u8 inner_sipv6_word_en;
+       u8 inner_dipv6_word_en;
+       u8 outer_sipv6_word_en;
+       u8 outer_dipv6_word_en;
+       u32 tuple_active;
+       u32 meta_data_active;
+};
+
+struct hclge_fd_cfg {
+       u8 fd_mode;
+       u8 fd_en;
+       u16 max_key_length;
+       u32 proto_support;
+       u32 rule_num[2]; /* rule entry number */
+       u16 cnt_num[2]; /* rule hit counter number */
+       struct hclge_fd_key_cfg key_cfg[2];
+};
+
+struct hclge_fd_rule_tuples {
+       u8 src_mac[6];
+       u8 dst_mac[6];
+       u32 src_ip[4];
+       u32 dst_ip[4];
+       u16 src_port;
+       u16 dst_port;
+       u16 vlan_tag1;
+       u16 ether_proto;
+       u8 ip_tos;
+       u8 ip_proto;
+};
+
+struct hclge_fd_rule {
+       struct hlist_node rule_node;
+       struct hclge_fd_rule_tuples tuples;
+       struct hclge_fd_rule_tuples tuples_mask;
+       u32 unused_tuple;
+       u32 flow_type;
+       u8 action;
+       u16 vf_id;
+       u16 queue_id;
+       u16 location;
+};
+
+struct hclge_fd_ad_data {
+       u16 ad_id;
+       u8 drop_packet;
+       u8 forward_to_direct_queue;
+       u16 queue_id;
+       u8 use_counter;
+       u8 counter_id;
+       u8 use_next_stage;
+       u8 write_rule_id_to_bd;
+       u8 next_input_key;
+       u16 rule_id;
+};
+
+/* For each bit of TCAM entry, it uses a pair of 'x' and
+ * 'y' to indicate which value to match, like below:
+ * ----------------------------------
+ * | bit x | bit y |  search value  |
+ * ----------------------------------
+ * |   0   |   0   |   always hit   |
+ * ----------------------------------
+ * |   1   |   0   |   match '0'    |
+ * ----------------------------------
+ * |   0   |   1   |   match '1'    |
+ * ----------------------------------
+ * |   1   |   1   |   invalid      |
+ * ----------------------------------
+ * Then for input key(k) and mask(v), we can calculate the value by
+ * the formulae:
+ *     x = (~k) & v
+ *     y = (k ^ ~v) & k
+ */
+#define calc_x(x, k, v) ((x) = (~(k) & (v)))
+#define calc_y(y, k, v) \
+       do { \
+               const typeof(k) _k_ = (k); \
+               const typeof(v) _v_ = (v); \
+               (y) = (_k_ ^ ~_v_) & (_k_); \
+       } while (0)
+
 #define HCLGE_VPORT_NUM 256
 struct hclge_dev {
        struct pci_dev *pdev;
@@ -442,12 +668,22 @@ struct hclge_dev {
        u32 pkt_buf_size; /* Total pf buf size for tx/rx */
        u32 mps; /* Max packet size */
 
-       enum hclge_mta_dmac_sel_type mta_mac_sel_type;
-       bool enable_mta; /* Multicast filter enable */
-
        struct hclge_vlan_type_cfg vlan_type_cfg;
 
        unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+
+       struct hclge_fd_cfg fd_cfg;
+       struct hlist_head fd_rule_list;
+       u16 hclge_fd_rule_num;
+
+       u16 wanted_umv_size;
+       /* max available unicast mac vlan space */
+       u16 max_umv_size;
+       /* private unicast mac vlan space, it's same for PF and its VFs */
+       u16 priv_umv_size;
+       /* unicast mac vlan space shared by PF and its VFs */
+       u16 share_umv_size;
+       struct mutex umv_mutex; /* protect share_umv_size */
 };
 
 /* VPort level vlan tag configuration for TX direction */
@@ -500,13 +736,12 @@ struct hclge_vport {
        struct hclge_tx_vtag_cfg  txvlan_cfg;
        struct hclge_rx_vtag_cfg  rxvlan_cfg;
 
+       u16 used_umv_num;
+
        int vport_id;
        struct hclge_dev *back;  /* Back reference to associated dev */
        struct hnae3_handle nic;
        struct hnae3_handle roce;
-
-       bool accept_mta_mc; /* whether to accept mta filter multicast */
-       unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
 };
 
 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -521,15 +756,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
                            const unsigned char *addr);
 
-int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
-                             u8 func_id,
-                             bool enable);
-int hclge_update_mta_status_common(struct hclge_vport *vport,
-                                  unsigned long *status,
-                                  u16 idx,
-                                  u16 count,
-                                  bool update_filter);
-
 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
                                int vector_id, bool en,
index f34851c91eb39432705a6206959feffa7cc56529..04462a347a94075bd28453b6db9088d77ef586c9 100644 (file)
@@ -233,43 +233,6 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
        return 0;
 }
 
-static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport,
-                                     u8 *msg, u8 idx, bool is_end)
-{
-#define HCLGE_MTA_STATUS_MSG_SIZE 13
-#define HCLGE_MTA_STATUS_MSG_BITS \
-                               (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
-#define HCLGE_MTA_STATUS_MSG_END_BITS \
-                               (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS)
-       unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)];
-       u16 tbl_cnt;
-       u16 tbl_idx;
-       u8 msg_ofs;
-       u8 msg_bit;
-
-       tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS :
-                       HCLGE_MTA_STATUS_MSG_BITS;
-
-       /* set msg field */
-       msg_ofs = 0;
-       msg_bit = 0;
-       memset(status, 0, sizeof(status));
-       for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) {
-               if (msg[msg_ofs] & BIT(msg_bit))
-                       set_bit(tbl_idx, status);
-
-               msg_bit++;
-               if (msg_bit == BITS_PER_BYTE) {
-                       msg_bit = 0;
-                       msg_ofs++;
-               }
-       }
-
-       return hclge_update_mta_status_common(vport,
-                                       status, idx * HCLGE_MTA_STATUS_MSG_BITS,
-                                       tbl_cnt, is_end);
-}
-
 static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
                                    struct hclge_mbx_vf_to_pf_cmd *mbx_req,
                                    bool gen_resp)
@@ -284,27 +247,6 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
                status = hclge_add_mc_addr_common(vport, mac_addr);
        } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
                status = hclge_rm_mc_addr_common(vport, mac_addr);
-       } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) {
-               u8 func_id = vport->vport_id;
-               bool enable = mbx_req->msg[2];
-
-               status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
-       } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) {
-               resp_data = hdev->mta_mac_sel_type;
-               resp_len = sizeof(u8);
-               gen_resp = true;
-               status = 0;
-       } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) {
-               /* mta status update msg format
-                * msg[2.6 : 2.0]  msg index
-                * msg[2.7]        msg is end
-                * msg[15 : 3]     mta status bits[103 : 0]
-                */
-               bool is_end = (mbx_req->msg[2] & 0x80) ? true : false;
-
-               status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3],
-                                                   mbx_req->msg[2] & 0x7F,
-                                                   is_end);
        } else {
                dev_err(&hdev->pdev->dev,
                        "failed to set mcast mac addr, unknown subcode %d\n",
index 00bb39451bc5cf5275b6822b69057437cc6883f8..aa5cb9834d73a807dd18661c10069b7c929cc6d6 100644 (file)
@@ -193,6 +193,7 @@ static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
 
        ether_addr_copy(pause_param->mac_addr, addr);
+       ether_addr_copy(pause_param->mac_addr_extra, addr);
        pause_param->pause_trans_gap = pause_trans_gap;
        pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
 
@@ -1279,10 +1280,15 @@ int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
        return 0;
 }
 
-void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
+int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
 {
        u8 i, bit_map = 0;
 
+       for (i = 0; i < hdev->num_alloc_vport; i++) {
+               if (num_tc > hdev->vport[i].alloc_tqps)
+                       return -EINVAL;
+       }
+
        hdev->tm_info.num_tc = num_tc;
 
        for (i = 0; i < hdev->tm_info.num_tc; i++)
@@ -1296,6 +1302,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
        hdev->hw_tc_map = bit_map;
 
        hclge_tm_schd_info_init(hdev);
+
+       return 0;
 }
 
 int hclge_tm_init_hw(struct hclge_dev *hdev)
index dd4c194747c16cbf4a716d811dd4102615231e2f..25eef13a3e14bb78ab62de74420a8dd1beb45189 100644 (file)
@@ -106,6 +106,10 @@ struct hclge_cfg_pause_param_cmd {
        u8 pause_trans_gap;
        u8 rsvd;
        __le16 pause_trans_time;
+       u8 rsvd1[6];
+       /* extra mac address to do double check for pause frame */
+       u8 mac_addr_extra[ETH_ALEN];
+       u16 rsvd2;
 };
 
 struct hclge_pfc_stats_cmd {
@@ -128,7 +132,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev);
 int hclge_pause_setup_hw(struct hclge_dev *hdev);
 int hclge_tm_schd_mode_hw(struct hclge_dev *hdev);
 int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
-void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
 int hclge_tm_map_cfg(struct hclge_dev *hdev);
 int hclge_tm_init_hw(struct hclge_dev *hdev);
index 978193123c717b871560dad04c0e8a36e876d605..ca4a9f790917747f4dce71e4599ed337cdc7cd69 100644 (file)
@@ -746,126 +746,6 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
        }
 }
 
-static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
-{
-       u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
-       int ret;
-
-       ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
-                                  HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
-                                  NULL, 0, true, &resp_msg, sizeof(u8));
-
-       if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "Read mta type fail, ret=%d.\n", ret);
-               return ret;
-       }
-
-       if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
-               dev_err(&hdev->pdev->dev,
-                       "Read mta type invalid, resp=%d.\n", resp_msg);
-               return -EINVAL;
-       }
-
-       hdev->mta_mac_sel_type = resp_msg;
-
-       return 0;
-}
-
-static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
-                                            const u8 *addr)
-{
-       u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
-       u16 high_val = addr[1] | (addr[0] << 8);
-
-       return (high_val >> rsh) & 0xfff;
-}
-
-static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
-                                       unsigned long *status)
-{
-#define HCLGEVF_MTA_STATUS_MSG_SIZE 13
-#define HCLGEVF_MTA_STATUS_MSG_BITS \
-                       (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
-#define HCLGEVF_MTA_STATUS_MSG_END_BITS \
-                       (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
-       u16 tbl_cnt;
-       u16 tbl_idx;
-       u8 msg_cnt;
-       u8 msg_idx;
-       int ret;
-
-       msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
-                              HCLGEVF_MTA_STATUS_MSG_BITS);
-       tbl_idx = 0;
-       msg_idx = 0;
-       while (msg_cnt--) {
-               u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
-               u8 *p = &msg[1];
-               u8 msg_ofs;
-               u8 msg_bit;
-
-               memset(msg, 0, sizeof(msg));
-
-               /* set index field */
-               msg[0] = 0x7F & msg_idx;
-
-               /* set end flag field */
-               if (msg_cnt == 0) {
-                       msg[0] |= 0x80;
-                       tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
-               } else {
-                       tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
-               }
-
-               /* set status field */
-               msg_ofs = 0;
-               msg_bit = 0;
-               while (tbl_cnt--) {
-                       if (test_bit(tbl_idx, status))
-                               p[msg_ofs] |= BIT(msg_bit);
-
-                       tbl_idx++;
-
-                       msg_bit++;
-                       if (msg_bit == BITS_PER_BYTE) {
-                               msg_bit = 0;
-                               msg_ofs++;
-                       }
-               }
-
-               ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
-                                          HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
-                                          msg, sizeof(msg), false, NULL, 0);
-               if (ret)
-                       break;
-
-               msg_idx++;
-       }
-
-       return ret;
-}
-
-static int hclgevf_update_mta_status(struct hnae3_handle *handle)
-{
-       unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
-       struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
-       struct net_device *netdev = hdev->nic.kinfo.netdev;
-       struct netdev_hw_addr *ha;
-       u16 tbl_idx;
-
-       /* clear status */
-       memset(mta_status, 0, sizeof(mta_status));
-
-       /* update status from mc addr list */
-       netdev_for_each_mc_addr(ha, netdev) {
-               tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
-               set_bit(tbl_idx, mta_status);
-       }
-
-       return hclgevf_do_update_mta_status(hdev, mta_status);
-}
-
 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -1871,14 +1751,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
                goto err_config;
        }
 
-       /* Initialize mta type for this VF */
-       ret = hclgevf_cfg_func_mta_type(hdev);
-       if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "failed(%d) to initialize MTA type\n", ret);
-               goto err_config;
-       }
-
        /* Initialize RSS for this VF */
        ret = hclgevf_rss_init_hw(hdev);
        if (ret) {
@@ -1975,11 +1847,11 @@ static void hclgevf_get_channels(struct hnae3_handle *handle,
 }
 
 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
-                                         u16 *free_tqps, u16 *max_rss_size)
+                                         u16 *alloc_tqps, u16 *max_rss_size)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 
-       *free_tqps = 0;
+       *alloc_tqps = hdev->num_tqps;
        *max_rss_size = hdev->rss_size_max;
 }
 
@@ -2038,7 +1910,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
        .rm_uc_addr = hclgevf_rm_uc_addr,
        .add_mc_addr = hclgevf_add_mc_addr,
        .rm_mc_addr = hclgevf_rm_mc_addr,
-       .update_mta_status = hclgevf_update_mta_status,
        .get_stats = hclgevf_get_stats,
        .update_stats = hclgevf_update_stats,
        .get_strings = hclgevf_get_strings,
index 2af01f107c635dcd9c01b6293a617af69553b2ec..cf5fbf793c5ee9578a07fe46955963440b9ff7ae 100644 (file)
@@ -47,9 +47,6 @@
 #define HCLGEVF_RSS_CFG_TBL_NUM \
        (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
 
-#define HCLGEVF_MTA_TBL_SIZE           4096
-#define HCLGEVF_MTA_TYPE_SEL_MAX       4
-
 /* states of hclgevf device & tasks */
 enum hclgevf_states {
        /* device states */
@@ -157,8 +154,6 @@ struct hclgevf_dev {
        u16 *vector_status;
        int *vector_irq;
 
-       bool accept_mta_mc; /* whether to accept mta filter multicast */
-       u8 mta_mac_sel_type;
        bool mbx_event_pending;
        struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
        struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
index 09e9da10b786549b6232d8069c4e45857b95fd8c..4a8f82938ed5b87c8da6b09e88e08d387c652f0c 100644 (file)
@@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev,
        stats->tx_errors  = nic_tx_stats->tx_dropped;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hinic_netpoll(struct net_device *netdev)
-{
-       struct hinic_dev *nic_dev = netdev_priv(netdev);
-       int i, num_qps;
-
-       num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
-       for (i = 0; i < num_qps; i++) {
-               struct hinic_txq *txq = &nic_dev->txqs[i];
-               struct hinic_rxq *rxq = &nic_dev->rxqs[i];
-
-               napi_schedule(&txq->napi);
-               napi_schedule(&rxq->napi);
-       }
-}
-#endif
-
 static const struct net_device_ops hinic_netdev_ops = {
        .ndo_open = hinic_open,
        .ndo_stop = hinic_close,
@@ -818,9 +801,6 @@ static const struct net_device_ops hinic_netdev_ops = {
        .ndo_start_xmit = hinic_xmit_frame,
        .ndo_tx_timeout = hinic_tx_timeout,
        .ndo_get_stats64 = hinic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = hinic_netpoll,
-#endif
 };
 
 static void netdev_features_init(struct net_device *netdev)
index aa0b89777e7449ce33950c042af474060cdd2df1..3baabdc897262698ab23b4bc1dedec22edc89919 100644 (file)
@@ -920,17 +920,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
        return rx;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ehea_netpoll(struct net_device *dev)
-{
-       struct ehea_port *port = netdev_priv(dev);
-       int i;
-
-       for (i = 0; i < port->num_def_qps; i++)
-               napi_schedule(&port->port_res[i].napi);
-}
-#endif
-
 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
 {
        struct ehea_port_res *pr = param;
@@ -2952,9 +2941,6 @@ static const struct net_device_ops ehea_netdev_ops = {
        .ndo_open               = ehea_open,
        .ndo_stop               = ehea_stop,
        .ndo_start_xmit         = ehea_start_xmit,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ehea_netpoll,
-#endif
        .ndo_get_stats64        = ehea_get_stats64,
        .ndo_set_mac_address    = ehea_set_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
index a8369addfe688412212d8eb7716f6cbbcbb41d28..7893beffcc714215a5ed47fc8658b9db66ee9d3a 100644 (file)
@@ -2207,19 +2207,6 @@ restart_poll:
        return frames_processed;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ibmvnic_netpoll_controller(struct net_device *dev)
-{
-       struct ibmvnic_adapter *adapter = netdev_priv(dev);
-       int i;
-
-       replenish_pools(netdev_priv(dev));
-       for (i = 0; i < adapter->req_rx_queues; i++)
-               ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
-                                    adapter->rx_scrq[i]);
-}
-#endif
-
 static int wait_for_reset(struct ibmvnic_adapter *adapter)
 {
        int rc, ret;
@@ -2292,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
        .ndo_set_mac_address    = ibmvnic_set_mac,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_tx_timeout         = ibmvnic_tx_timeout,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ibmvnic_netpoll_controller,
-#endif
        .ndo_change_mtu         = ibmvnic_change_mtu,
        .ndo_features_check     = ibmvnic_features_check,
 };
@@ -2364,8 +2348,13 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
-       ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
-       ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
+       if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
+               ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
+               ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
+       } else {
+               ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
+               ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
+       }
        ring->rx_mini_max_pending = 0;
        ring->rx_jumbo_max_pending = 0;
        ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
@@ -2378,21 +2367,23 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
                                 struct ethtool_ringparam *ring)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       int ret;
 
-       if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq  ||
-           ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
-               netdev_err(netdev, "Invalid request.\n");
-               netdev_err(netdev, "Max tx buffers = %llu\n",
-                          adapter->max_rx_add_entries_per_subcrq);
-               netdev_err(netdev, "Max rx buffers = %llu\n",
-                          adapter->max_tx_entries_per_subcrq);
-               return -EINVAL;
-       }
-
+       ret = 0;
        adapter->desired.rx_entries = ring->rx_pending;
        adapter->desired.tx_entries = ring->tx_pending;
 
-       return wait_for_reset(adapter);
+       ret = wait_for_reset(adapter);
+
+       if (!ret &&
+           (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
+            adapter->req_tx_entries_per_subcrq != ring->tx_pending))
+               netdev_info(netdev,
+                           "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
+                           ring->rx_pending, ring->tx_pending,
+                           adapter->req_rx_add_entries_per_subcrq,
+                           adapter->req_tx_entries_per_subcrq);
+       return ret;
 }
 
 static void ibmvnic_get_channels(struct net_device *netdev,
@@ -2400,8 +2391,14 @@ static void ibmvnic_get_channels(struct net_device *netdev,
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
-       channels->max_rx = adapter->max_rx_queues;
-       channels->max_tx = adapter->max_tx_queues;
+       if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
+               channels->max_rx = adapter->max_rx_queues;
+               channels->max_tx = adapter->max_tx_queues;
+       } else {
+               channels->max_rx = IBMVNIC_MAX_QUEUES;
+               channels->max_tx = IBMVNIC_MAX_QUEUES;
+       }
+
        channels->max_other = 0;
        channels->max_combined = 0;
        channels->rx_count = adapter->req_rx_queues;
@@ -2414,11 +2411,23 @@ static int ibmvnic_set_channels(struct net_device *netdev,
                                struct ethtool_channels *channels)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       int ret;
 
+       ret = 0;
        adapter->desired.rx_queues = channels->rx_count;
        adapter->desired.tx_queues = channels->tx_count;
 
-       return wait_for_reset(adapter);
+       ret = wait_for_reset(adapter);
+
+       if (!ret &&
+           (adapter->req_rx_queues != channels->rx_count ||
+            adapter->req_tx_queues != channels->tx_count))
+               netdev_info(netdev,
+                           "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
+                           channels->rx_count, channels->tx_count,
+                           adapter->req_rx_queues, adapter->req_tx_queues);
+       return ret;
+
 }
 
 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2426,32 +2435,43 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
        struct ibmvnic_adapter *adapter = netdev_priv(dev);
        int i;
 
-       if (stringset != ETH_SS_STATS)
-               return;
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
+                               i++, data += ETH_GSTRING_LEN)
+                       memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
 
-       for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
-               memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+               for (i = 0; i < adapter->req_tx_queues; i++) {
+                       snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
+                       data += ETH_GSTRING_LEN;
 
-       for (i = 0; i < adapter->req_tx_queues; i++) {
-               snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
-               data += ETH_GSTRING_LEN;
+                       snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
+                       data += ETH_GSTRING_LEN;
 
-               snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
-               data += ETH_GSTRING_LEN;
+                       snprintf(data, ETH_GSTRING_LEN,
+                                "tx%d_dropped_packets", i);
+                       data += ETH_GSTRING_LEN;
+               }
 
-               snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
-               data += ETH_GSTRING_LEN;
-       }
+               for (i = 0; i < adapter->req_rx_queues; i++) {
+                       snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
+                       data += ETH_GSTRING_LEN;
 
-       for (i = 0; i < adapter->req_rx_queues; i++) {
-               snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
-               data += ETH_GSTRING_LEN;
+                       snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
+                       data += ETH_GSTRING_LEN;
 
-               snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
-               data += ETH_GSTRING_LEN;
+                       snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
+                       data += ETH_GSTRING_LEN;
+               }
+               break;
 
-               snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
-               data += ETH_GSTRING_LEN;
+       case ETH_SS_PRIV_FLAGS:
+               for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
+                       strcpy(data + i * ETH_GSTRING_LEN,
+                              ibmvnic_priv_flags[i]);
+               break;
+       default:
+               return;
        }
 }
 
@@ -2464,6 +2484,8 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
                return ARRAY_SIZE(ibmvnic_stats) +
                       adapter->req_tx_queues * NUM_TX_STATS +
                       adapter->req_rx_queues * NUM_RX_STATS;
+       case ETH_SS_PRIV_FLAGS:
+               return ARRAY_SIZE(ibmvnic_priv_flags);
        default:
                return -EOPNOTSUPP;
        }
@@ -2514,6 +2536,25 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
        }
 }
 
+static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
+{
+       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+       return adapter->priv_flags;
+}
+
+static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
+
+       if (which_maxes)
+               adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
+       else
+               adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
+
+       return 0;
+}
 static const struct ethtool_ops ibmvnic_ethtool_ops = {
        .get_drvinfo            = ibmvnic_get_drvinfo,
        .get_msglevel           = ibmvnic_get_msglevel,
@@ -2527,6 +2568,8 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
        .get_sset_count         = ibmvnic_get_sset_count,
        .get_ethtool_stats      = ibmvnic_get_ethtool_stats,
        .get_link_ksettings     = ibmvnic_get_link_ksettings,
+       .get_priv_flags         = ibmvnic_get_priv_flags,
+       .set_priv_flags         = ibmvnic_set_priv_flags,
 };
 
 /* Routines for managing CRQs/sCRQs  */
index f06eec145ca60689bef26f119ed18f372867924a..18103b811d4db398df7ce6a6e27c6bda2077c4c2 100644 (file)
@@ -39,7 +39,8 @@
 #define IBMVNIC_RX_WEIGHT              16
 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
 #define IBMVNIC_BUFFS_PER_POOL 100
-#define IBMVNIC_MAX_QUEUES     10
+#define IBMVNIC_MAX_QUEUES     16
+#define IBMVNIC_MAX_QUEUE_SZ   4096
 
 #define IBMVNIC_TSO_BUF_SZ     65536
 #define IBMVNIC_TSO_BUFS       64
 #define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
 #define IBMVNIC_BUFFER_HLEN 500
 
+static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
+#define IBMVNIC_USE_SERVER_MAXES 0x1
+       "use-server-maxes"
+};
+
 struct ibmvnic_login_buffer {
        __be32 len;
        __be32 version;
@@ -969,6 +975,7 @@ struct ibmvnic_adapter {
        struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
        dma_addr_t ip_offload_ctrl_tok;
        u32 msg_enable;
+       u32 priv_flags;
 
        /* Vital Product Data (VPD) */
        struct ibmvnic_vpd *vpd;
index 56b911a5dd8be669ec0fcd231eeb01987cd7afc6..a20d1cf058ad472b751a59787eb79d444c9c8486 100644 (file)
@@ -132,8 +132,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                dev_info(&pf->pdev->dev, "        vlan_features = 0x%08lx\n",
                         (unsigned long int)nd->vlan_features);
        }
-       dev_info(&pf->pdev->dev, "    active_vlans is %s\n",
-                vsi->active_vlans ? "<valid>" : "<null>");
        dev_info(&pf->pdev->dev,
                 "    flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
                 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
index f4bb2779f03ad3d25b7aef5ddd60346c6d57fbae..81b0e1f8d14b6d041e4a8668b0f20955b9633e55 100644 (file)
@@ -4256,7 +4256,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
                vf->link_forced = true;
                vf->link_up = true;
                pfe.event_data.link_event.link_status = true;
-               pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+               pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
                break;
        case IFLA_VF_LINK_STATE_DISABLE:
                vf->link_forced = true;
index a512f7521841129cf24131a20f89f467a6a38c4c..272d76b733aab5bd76f3ac422d932fbaeb5501dd 100644 (file)
@@ -342,7 +342,7 @@ struct iavf_adapter {
        struct iavf_channel_config ch_config;
        u8 num_tc;
        struct list_head cloud_filter_list;
-       /* lock to protest access to the cloud filter list */
+       /* lock to protect access to the cloud filter list */
        spinlock_t cloud_filter_list_lock;
        u16 num_cloud_filters;
 };
index 4058673fd8532481aae239eb4d3f5b7049754439..e5d6f684437e42ef65df30cd1cda6b8d64702c5d 100644 (file)
@@ -13,5 +13,7 @@ ice-y := ice_main.o   \
         ice_nvm.o      \
         ice_switch.o   \
         ice_sched.o    \
+        ice_lib.o      \
         ice_txrx.o     \
         ice_ethtool.o
+ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
index 9cf233d085d80943f487d6ad84982d0ced31d72e..4c4b5717a627de6353f8f4248163f1f7e333714f 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/if_bridge.h>
+#include <linux/avf/virtchnl.h>
 #include <net/ipv6.h>
 #include "ice_devids.h"
 #include "ice_type.h"
 #include "ice_switch.h"
 #include "ice_common.h"
 #include "ice_sched.h"
+#include "ice_virtchnl_pf.h"
+#include "ice_sriov.h"
 
 extern const char ice_drv_ver[];
 #define ICE_BAR0               0
 #define ICE_DFLT_NUM_DESC      128
-#define ICE_MIN_NUM_DESC       8
-#define ICE_MAX_NUM_DESC       8160
 #define ICE_REQ_DESC_MULTIPLE  32
+#define ICE_MIN_NUM_DESC       ICE_REQ_DESC_MULTIPLE
+#define ICE_MAX_NUM_DESC       8160
 #define ICE_DFLT_TRAFFIC_CLASS BIT(0)
 #define ICE_INT_NAME_STR_LEN   (IFNAMSIZ + 16)
 #define ICE_ETHTOOL_FWVER_LEN  32
 #define ICE_AQ_LEN             64
+#define ICE_MBXQ_LEN           64
 #define ICE_MIN_MSIX           2
 #define ICE_NO_VSI             0xffff
 #define ICE_MAX_VSI_ALLOC      130
@@ -63,6 +67,14 @@ extern const char ice_drv_ver[];
 #define ICE_RES_MISC_VEC_ID    (ICE_RES_VALID_BIT - 1)
 #define ICE_INVAL_Q_INDEX      0xffff
 #define ICE_INVAL_VFID         256
+#define ICE_MAX_VF_COUNT       256
+#define ICE_MAX_QS_PER_VF              256
+#define ICE_MIN_QS_PER_VF              1
+#define ICE_DFLT_QS_PER_VF             4
+#define ICE_MAX_BASE_QS_PER_VF         16
+#define ICE_MAX_INTR_PER_VF            65
+#define ICE_MIN_INTR_PER_VF            (ICE_MIN_QS_PER_VF + 1)
+#define ICE_DFLT_INTR_PER_VF           (ICE_DFLT_QS_PER_VF + 1)
 
 #define ICE_VSIQF_HKEY_ARRAY_SIZE      ((VSIQF_HKEY_MAX_INDEX + 1) *   4)
 
@@ -124,7 +136,7 @@ enum ice_state {
        __ICE_DOWN,
        __ICE_NEEDS_RESTART,
        __ICE_PREPARED_FOR_RESET,       /* set by driver when prepared */
-       __ICE_RESET_RECOVERY_PENDING,   /* set by driver when reset starts */
+       __ICE_RESET_OICR_RECV,          /* set by driver after rcv reset OICR */
        __ICE_PFR_REQ,                  /* set by driver and peers */
        __ICE_CORER_REQ,                /* set by driver and peers */
        __ICE_GLOBR_REQ,                /* set by driver and peers */
@@ -133,9 +145,21 @@ enum ice_state {
        __ICE_EMPR_RECV,                /* set by OICR handler */
        __ICE_SUSPENDED,                /* set on module remove path */
        __ICE_RESET_FAILED,             /* set by reset/rebuild */
+       /* When checking for the PF to be in a nominal operating state, the
+        * bits that are grouped at the beginning of the list need to be
+        * checked.  Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
+        * be checked.  If you need to add a bit into consideration for nominal
+        * operating state, it must be added before
+        * __ICE_STATE_NOMINAL_CHECK_BITS.  Do not move this entry's position
+        * without appropriate consideration.
+        */
+       __ICE_STATE_NOMINAL_CHECK_BITS,
        __ICE_ADMINQ_EVENT_PENDING,
+       __ICE_MAILBOXQ_EVENT_PENDING,
        __ICE_MDD_EVENT_PENDING,
+       __ICE_VFLR_EVENT_PENDING,
        __ICE_FLTR_OVERFLOW_PROMISC,
+       __ICE_VF_DIS,
        __ICE_CFG_BUSY,
        __ICE_SERVICE_SCHED,
        __ICE_SERVICE_DIS,
@@ -172,7 +196,8 @@ struct ice_vsi {
        u32 rx_buf_failed;
        u32 rx_page_failed;
        int num_q_vectors;
-       int base_vector;
+       int sw_base_vector;             /* Irq base for OS reserved vectors */
+       int hw_base_vector;             /* HW (absolute) index of a vector */
        enum ice_vsi_type type;
        u16 vsi_num;                     /* HW (absolute) index of this VSI */
        u16 idx;                         /* software index in pf->vsi[] */
@@ -180,6 +205,8 @@ struct ice_vsi {
        /* Interrupt thresholds */
        u16 work_lmt;
 
+       s16 vf_id;                      /* VF ID for SR-IOV VSIs */
+
        /* RSS config */
        u16 rss_table_size;     /* HW RSS table size */
        u16 rss_size;           /* Allocated RSS queues */
@@ -229,21 +256,39 @@ struct ice_q_vector {
        u8 num_ring_tx;                 /* total number of tx rings in vector */
        u8 num_ring_rx;                 /* total number of rx rings in vector */
        char name[ICE_INT_NAME_STR_LEN];
+       /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
+        * value to the device
+        */
+       u8 intrl;
 } ____cacheline_internodealigned_in_smp;
 
 enum ice_pf_flags {
        ICE_FLAG_MSIX_ENA,
        ICE_FLAG_FLTR_SYNC,
        ICE_FLAG_RSS_ENA,
+       ICE_FLAG_SRIOV_ENA,
+       ICE_FLAG_SRIOV_CAPABLE,
        ICE_PF_FLAGS_NBITS              /* must be last */
 };
 
 struct ice_pf {
        struct pci_dev *pdev;
+
+       /* OS reserved IRQ details */
        struct msix_entry *msix_entries;
-       struct ice_res_tracker *irq_tracker;
+       struct ice_res_tracker *sw_irq_tracker;
+
+       /* HW reserved Interrupts for this PF */
+       struct ice_res_tracker *hw_irq_tracker;
+
        struct ice_vsi **vsi;           /* VSIs created by the driver */
        struct ice_sw *first_sw;        /* first switch created by firmware */
+       /* Virtchnl/SR-IOV config info */
+       struct ice_vf *vf;
+       int num_alloc_vfs;              /* actual number of VFs allocated */
+       u16 num_vfs_supported;          /* num VFs supported for this PF */
+       u16 num_vf_qps;                 /* num queue pairs per VF */
+       u16 num_vf_msix;                /* num vectors per VF */
        DECLARE_BITMAP(state, __ICE_STATE_NBITS);
        DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
        DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
@@ -256,9 +301,11 @@ struct ice_pf {
        struct mutex sw_mutex;          /* lock for protecting VSI alloc flow */
        u32 msg_enable;
        u32 hw_csum_rx_error;
-       u32 oicr_idx;           /* Other interrupt cause vector index */
+       u32 sw_oicr_idx;        /* Other interrupt cause SW vector index */
+       u32 num_avail_sw_msix;  /* remaining MSIX SW vectors left unclaimed */
+       u32 hw_oicr_idx;        /* Other interrupt cause vector HW index */
+       u32 num_avail_hw_msix;  /* remaining HW MSIX vectors left unclaimed */
        u32 num_lan_msix;       /* Total MSIX vectors for base driver */
-       u32 num_avail_msix;     /* remaining MSIX vectors left unclaimed */
        u16 num_lan_tx;         /* num lan tx queues setup */
        u16 num_lan_rx;         /* num lan rx queues setup */
        u16 q_left_tx;          /* remaining num tx queues left unclaimed */
@@ -293,8 +340,8 @@ struct ice_netdev_priv {
 static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
                                       struct ice_q_vector *q_vector)
 {
-       u32 vector = (vsi && q_vector) ? vsi->base_vector + q_vector->v_idx :
-                                       ((struct ice_pf *)hw->back)->oicr_idx;
+       u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx :
+                               ((struct ice_pf *)hw->back)->hw_oicr_idx;
        int itr = ICE_ITR_NONE;
        u32 val;
 
index f8dfd675486c5b6bf5c21598a7a7f25eb59907f4..6653555f55dd31d86e8053bf81a3d58570e68caf 100644 (file)
@@ -87,6 +87,8 @@ struct ice_aqc_list_caps {
 /* Device/Function buffer entry, repeated per reported capability */
 struct ice_aqc_list_caps_elem {
        __le16 cap;
+#define ICE_AQC_CAPS_SRIOV                             0x0012
+#define ICE_AQC_CAPS_VF                                        0x0013
 #define ICE_AQC_CAPS_VSI                               0x0017
 #define ICE_AQC_CAPS_RSS                               0x0040
 #define ICE_AQC_CAPS_RXQS                              0x0041
@@ -736,6 +738,10 @@ struct ice_aqc_add_elem {
        struct ice_aqc_txsched_elem_data generic[1];
 };
 
+struct ice_aqc_get_elem {
+       struct ice_aqc_txsched_elem_data generic[1];
+};
+
 struct ice_aqc_get_topo_elem {
        struct ice_aqc_txsched_topo_grp_info_hdr hdr;
        struct ice_aqc_txsched_elem_data
@@ -1071,6 +1077,19 @@ struct ice_aqc_nvm {
        __le32 addr_low;
 };
 
+/**
+ * Send to PF command (indirect 0x0801) id is only used by PF
+ *
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ *
+ */
+struct ice_aqc_pf_vf_msg {
+       __le32 id;
+       u32 reserved;
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
 /* Get/Set RSS key (indirect 0x0B04/0x0B02) */
 struct ice_aqc_get_set_rss_key {
 #define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@@ -1328,6 +1347,7 @@ struct ice_aq_desc {
                struct ice_aqc_query_txsched_res query_sched_res;
                struct ice_aqc_add_move_delete_elem add_move_delete_elem;
                struct ice_aqc_nvm nvm;
+               struct ice_aqc_pf_vf_msg virt;
                struct ice_aqc_get_set_rss_lut get_set_rss_lut;
                struct ice_aqc_get_set_rss_key get_set_rss_key;
                struct ice_aqc_add_txqs add_txqs;
@@ -1409,6 +1429,7 @@ enum ice_adminq_opc {
        /* transmit scheduler commands */
        ice_aqc_opc_get_dflt_topo                       = 0x0400,
        ice_aqc_opc_add_sched_elems                     = 0x0401,
+       ice_aqc_opc_get_sched_elems                     = 0x0404,
        ice_aqc_opc_suspend_sched_elems                 = 0x0409,
        ice_aqc_opc_resume_sched_elems                  = 0x040A,
        ice_aqc_opc_delete_sched_elems                  = 0x040F,
@@ -1424,6 +1445,10 @@ enum ice_adminq_opc {
        /* NVM commands */
        ice_aqc_opc_nvm_read                            = 0x0701,
 
+       /* PF/VF mailbox commands */
+       ice_mbx_opc_send_msg_to_pf                      = 0x0801,
+       ice_mbx_opc_send_msg_to_vf                      = 0x0802,
+
        /* RSS commands */
        ice_aqc_opc_set_rss_key                         = 0x0B02,
        ice_aqc_opc_set_rss_lut                         = 0x0B03,
index 0847dbf9d42fa3f35182bf3efa1037089daf99fc..c52f450f2c0d68d0f3e3924bb33d624029e81662 100644 (file)
@@ -422,7 +422,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
                        devm_kfree(ice_hw_to_dev(hw), lst_itr);
                }
        }
-
+       ice_rm_all_sw_replay_rule_info(hw);
        devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
        devm_kfree(ice_hw_to_dev(hw), sw);
 }
@@ -597,6 +597,39 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
        ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n");
 }
 
+/**
+ * ice_get_itr_intrl_gran - determine int/intrl granularity
+ * @hw: pointer to the hw struct
+ *
+ * Determines the itr/intrl granularities based on the maximum aggregate
+ * bandwidth according to the device's configuration during power-on.
+ */
+static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
+{
+       u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
+                        GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
+                       GL_PWR_MODE_CTL_CAR_MAX_BW_S;
+
+       switch (max_agg_bw) {
+       case ICE_MAX_AGG_BW_200G:
+       case ICE_MAX_AGG_BW_100G:
+       case ICE_MAX_AGG_BW_50G:
+               hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
+               hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
+               break;
+       case ICE_MAX_AGG_BW_25G:
+               hw->itr_gran = ICE_ITR_GRAN_MAX_25;
+               hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
+               break;
+       default:
+               ice_debug(hw, ICE_DBG_INIT,
+                         "Failed to determine itr/intrl granularity\n");
+               return ICE_ERR_CFG;
+       }
+
+       return 0;
+}
+
 /**
  * ice_init_hw - main hardware initialization routine
  * @hw: pointer to the hardware structure
@@ -621,11 +654,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
        if (status)
                return status;
 
-       /* set these values to minimum allowed */
-       hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200;
-       hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100;
-       hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50;
-       hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25;
+       status = ice_get_itr_intrl_gran(hw);
+       if (status)
+               return status;
 
        status = ice_init_all_ctrlq(hw);
        if (status)
@@ -1375,6 +1406,28 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
                u16 cap = le16_to_cpu(cap_resp->cap);
 
                switch (cap) {
+               case ICE_AQC_CAPS_SRIOV:
+                       caps->sr_iov_1_1 = (number == 1);
+                       ice_debug(hw, ICE_DBG_INIT,
+                                 "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1);
+                       break;
+               case ICE_AQC_CAPS_VF:
+                       if (dev_p) {
+                               dev_p->num_vfs_exposed = number;
+                               ice_debug(hw, ICE_DBG_INIT,
+                                         "HW caps: VFs exposed = %d\n",
+                                         dev_p->num_vfs_exposed);
+                       } else if (func_p) {
+                               func_p->num_allocd_vfs = number;
+                               func_p->vf_base_id = logical_id;
+                               ice_debug(hw, ICE_DBG_INIT,
+                                         "HW caps: VFs allocated = %d\n",
+                                         func_p->num_allocd_vfs);
+                               ice_debug(hw, ICE_DBG_INIT,
+                                         "HW caps: VF base_id = %d\n",
+                                         func_p->vf_base_id);
+                       }
+                       break;
                case ICE_AQC_CAPS_VSI:
                        if (dev_p) {
                                dev_p->num_vsi_allocd_to_host = number;
@@ -1451,7 +1504,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
  * @hw: pointer to the hw struct
  * @buf: a virtual buffer to hold the capabilities
  * @buf_size: Size of the virtual buffer
- * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
+ * @cap_count: cap count needed if AQ err==ENOMEM
  * @opc: capabilities type to discover - pass in the command opcode
  * @cd: pointer to command details structure or NULL
  *
@@ -1459,7 +1512,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
  * the firmware.
  */
 static enum ice_status
-ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
+ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
                     enum ice_adminq_opc opc, struct ice_sq_cd *cd)
 {
        struct ice_aqc_list_caps *cmd;
@@ -1477,58 +1530,76 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
        status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
        if (!status)
                ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
-       *data_size = le16_to_cpu(desc.datalen);
-
+       else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
+               *cap_count =
+                       DIV_ROUND_UP(le16_to_cpu(desc.datalen),
+                                    sizeof(struct ice_aqc_list_caps_elem));
        return status;
 }
 
 /**
- * ice_get_caps - get info about the HW
+ * ice_discover_caps - get info about the HW
  * @hw: pointer to the hardware structure
+ * @opc: capabilities type to discover - pass in the command opcode
  */
-enum ice_status ice_get_caps(struct ice_hw *hw)
+static enum ice_status ice_discover_caps(struct ice_hw *hw,
+                                        enum ice_adminq_opc opc)
 {
        enum ice_status status;
-       u16 data_size = 0;
+       u32 cap_count;
        u16 cbuf_len;
        u8 retries;
 
        /* The driver doesn't know how many capabilities the device will return
         * so the buffer size required isn't known ahead of time. The driver
         * starts with cbuf_len and if this turns out to be insufficient, the
-        * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
-        * The driver then allocates the buffer of this size and retries the
-        * operation. So it follows that the retry count is 2.
+        * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
+        * The driver then allocates the buffer based on the count and retries
+        * the operation. So it follows that the retry count is 2.
         */
 #define ICE_GET_CAP_BUF_COUNT  40
 #define ICE_GET_CAP_RETRY_COUNT        2
 
-       cbuf_len = ICE_GET_CAP_BUF_COUNT *
-               sizeof(struct ice_aqc_list_caps_elem);
-
+       cap_count = ICE_GET_CAP_BUF_COUNT;
        retries = ICE_GET_CAP_RETRY_COUNT;
 
        do {
                void *cbuf;
 
+               cbuf_len = (u16)(cap_count *
+                                sizeof(struct ice_aqc_list_caps_elem));
                cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
                if (!cbuf)
                        return ICE_ERR_NO_MEMORY;
 
-               status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
-                                             ice_aqc_opc_list_func_caps, NULL);
+               status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
+                                             opc, NULL);
                devm_kfree(ice_hw_to_dev(hw), cbuf);
 
                if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
                        break;
 
                /* If ENOMEM is returned, try again with bigger buffer */
-               cbuf_len = data_size;
        } while (--retries);
 
        return status;
 }
 
+/**
+ * ice_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_get_caps(struct ice_hw *hw)
+{
+       enum ice_status status;
+
+       status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
+       if (!status)
+               status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
+
+       return status;
+}
+
 /**
  * ice_aq_manage_mac_write - manage MAC address write command
  * @hw: pointer to the hw struct
@@ -1722,8 +1793,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
  * ice_update_link_info - update status of the HW network link
  * @pi: port info structure of the interested logical port
  */
-static enum ice_status
-ice_update_link_info(struct ice_port_info *pi)
+enum ice_status ice_update_link_info(struct ice_port_info *pi)
 {
        struct ice_aqc_get_phy_caps_data *pcaps;
        struct ice_phy_info *phy_info;
@@ -2037,7 +2107,7 @@ ice_aq_get_set_rss_lut_exit:
 /**
  * ice_aq_get_rss_lut
  * @hw: pointer to the hardware structure
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
  * @lut_type: LUT table type
  * @lut: pointer to the LUT buffer provided by the caller
  * @lut_size: size of the LUT buffer
@@ -2045,17 +2115,20 @@ ice_aq_get_set_rss_lut_exit:
  * get the RSS lookup table, PF or VSI type
  */
 enum ice_status
-ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
-                  u16 lut_size)
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
+                  u8 *lut, u16 lut_size)
 {
-       return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
-                                       false);
+       if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+               return ICE_ERR_PARAM;
+
+       return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+                                       lut_type, lut, lut_size, 0, false);
 }
 
 /**
  * ice_aq_set_rss_lut
  * @hw: pointer to the hardware structure
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
  * @lut_type: LUT table type
  * @lut: pointer to the LUT buffer provided by the caller
  * @lut_size: size of the LUT buffer
@@ -2063,11 +2136,14 @@ ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
  * set the RSS lookup table, PF or VSI type
  */
 enum ice_status
-ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
-                  u16 lut_size)
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
+                  u8 *lut, u16 lut_size)
 {
-       return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
-                                       true);
+       if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+               return ICE_ERR_PARAM;
+
+       return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+                                       lut_type, lut, lut_size, 0, true);
 }
 
 /**
@@ -2108,31 +2184,39 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
 /**
  * ice_aq_get_rss_key
  * @hw: pointer to the hw struct
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
  * @key: pointer to key info struct
  *
  * get the RSS key per VSI
  */
 enum ice_status
-ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
                   struct ice_aqc_get_set_rss_keys *key)
 {
-       return __ice_aq_get_set_rss_key(hw, vsi_id, key, false);
+       if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
+               return ICE_ERR_PARAM;
+
+       return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+                                       key, false);
 }
 
 /**
  * ice_aq_set_rss_key
  * @hw: pointer to the hw struct
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
  * @keys: pointer to key info struct
  *
  * set the RSS key per VSI
  */
 enum ice_status
-ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
                   struct ice_aqc_get_set_rss_keys *keys)
 {
-       return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true);
+       if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
+               return ICE_ERR_PARAM;
+
+       return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+                                       keys, true);
 }
 
 /**
@@ -2203,6 +2287,8 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
  * @num_qgrps: number of groups in the list
  * @qg_list: the list of groups to disable
  * @buf_size: the total size of the qg_list buffer in bytes
+ * @rst_src: if called due to reset, specifies the RST source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
  * @cd: pointer to command details structure or NULL
  *
  * Disable LAN Tx queue (0x0C31)
@@ -2210,6 +2296,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
 static enum ice_status
 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
                   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
+                  enum ice_disq_rst_src rst_src, u16 vmvf_num,
                   struct ice_sq_cd *cd)
 {
        struct ice_aqc_dis_txqs *cmd;
@@ -2219,14 +2306,45 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
        cmd = &desc.params.dis_txqs;
        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
 
-       if (!qg_list)
+       /* qg_list can be NULL only in VM/VF reset flow */
+       if (!qg_list && !rst_src)
                return ICE_ERR_PARAM;
 
        if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
                return ICE_ERR_PARAM;
-       desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
        cmd->num_entries = num_qgrps;
 
+       cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
+                                           ICE_AQC_Q_DIS_TIMEOUT_M);
+
+       switch (rst_src) {
+       case ICE_VM_RESET:
+               cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
+               cmd->vmvf_and_timeout |=
+                       cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
+               break;
+       case ICE_VF_RESET:
+               cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
+               /* In this case, FW expects vmvf_num to be absolute VF id */
+               cmd->vmvf_and_timeout |=
+                       cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
+                                   ICE_AQC_Q_DIS_VMVF_NUM_M);
+               break;
+       case ICE_NO_RESET:
+       default:
+               break;
+       }
+
+       /* If no queue group info, we are in a reset flow. Issue the AQ */
+       if (!qg_list)
+               goto do_aq;
+
+       /* set RD bit to indicate that command buffer is provided by the driver
+        * and it needs to be read by the firmware
+        */
+       desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
        for (i = 0; i < num_qgrps; ++i) {
                /* Calculate the size taken up by the queue IDs in this group */
                sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
@@ -2242,6 +2360,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
        if (buf_size != sz)
                return ICE_ERR_PARAM;
 
+do_aq:
        return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
 }
 
@@ -2471,7 +2590,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
 /**
  * ice_ena_vsi_txq
  * @pi: port information structure
- * @vsi_id: VSI id
+ * @vsi_handle: software VSI handle
  * @tc: tc number
  * @num_qgrps: Number of added queue groups
  * @buf: list of queue groups to be added
@@ -2481,7 +2600,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
  * This function adds one lan q
  */
 enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
                struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
                struct ice_sq_cd *cd)
 {
@@ -2498,15 +2617,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
 
        hw = pi->hw;
 
+       if (!ice_is_vsi_valid(hw, vsi_handle))
+               return ICE_ERR_PARAM;
+
        mutex_lock(&pi->sched_lock);
 
        /* find a parent node */
-       parent = ice_sched_get_free_qparent(pi, vsi_id, tc,
+       parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
                                            ICE_SCHED_NODE_OWNER_LAN);
        if (!parent) {
                status = ICE_ERR_PARAM;
                goto ena_txq_exit;
        }
+
        buf->parent_teid = parent->info.node_teid;
        node.parent_teid = parent->info.node_teid;
        /* Mark that the values in the "generic" section as valid. The default
@@ -2544,13 +2667,16 @@ ena_txq_exit:
  * @num_queues: number of queues
  * @q_ids: pointer to the q_id array
  * @q_teids: pointer to queue node teids
+ * @rst_src: if called due to reset, specifies the RST source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
  * @cd: pointer to command details structure or NULL
  *
  * This function removes queues and their corresponding nodes in SW DB
  */
 enum ice_status
 ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
-               u32 *q_teids, struct ice_sq_cd *cd)
+               u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
+               struct ice_sq_cd *cd)
 {
        enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
        struct ice_aqc_dis_txq_item qg_list;
@@ -2559,6 +2685,15 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
        if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
                return ICE_ERR_CFG;
 
+       /* if queue is disabled already yet the disable queue command has to be
+        * sent to complete the VF reset, then call ice_aq_dis_lan_txq without
+        * any queue information
+        */
+
+       if (!num_queues && rst_src)
+               return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
+                                         NULL);
+
        mutex_lock(&pi->sched_lock);
 
        for (i = 0; i < num_queues; i++) {
@@ -2571,7 +2706,8 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
                qg_list.num_qs = 1;
                qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
                status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
-                                           sizeof(qg_list), cd);
+                                           sizeof(qg_list), rst_src, vmvf_num,
+                                           cd);
 
                if (status)
                        break;
@@ -2584,7 +2720,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
 /**
  * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
  * @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
  * @tc_bitmap: TC bitmap
  * @maxqs: max queues array per TC
  * @owner: lan or rdma
@@ -2592,7 +2728,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
  * This function adds/updates the VSI queues per TC.
  */
 static enum ice_status
-ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
               u16 *maxqs, u8 owner)
 {
        enum ice_status status = 0;
@@ -2601,6 +2737,9 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
        if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
                return ICE_ERR_CFG;
 
+       if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+               return ICE_ERR_PARAM;
+
        mutex_lock(&pi->sched_lock);
 
        for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
@@ -2608,7 +2747,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
                if (!ice_sched_get_tc_node(pi, i))
                        continue;
 
-               status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner,
+               status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
                                           ice_is_tc_ena(tc_bitmap, i));
                if (status)
                        break;
@@ -2621,16 +2760,140 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
 /**
  * ice_cfg_vsi_lan - configure VSI lan queues
  * @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
  * @tc_bitmap: TC bitmap
  * @max_lanqs: max lan queues array per TC
  *
  * This function adds/updates the VSI lan queues per TC.
  */
 enum ice_status
-ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
                u16 *max_lanqs)
 {
-       return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs,
+       return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
                              ICE_SCHED_NODE_OWNER_LAN);
 }
+
+/**
+ * ice_replay_pre_init - replay pre initialization
+ * @hw: pointer to the hw struct
+ *
+ * Initializes required config data for VSI, FD, ACL, and RSS before replay.
+ */
+static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
+{
+       struct ice_switch_info *sw = hw->switch_info;
+       u8 i;
+
+       /* Delete old entries from replay filter list head if there is any */
+       ice_rm_all_sw_replay_rule_info(hw);
+       /* In start of replay, move entries into replay_rules list, it
+        * will allow adding rules entries back to filt_rules list,
+        * which is operational list.
+        */
+       for (i = 0; i < ICE_SW_LKUP_LAST; i++)
+               list_replace_init(&sw->recp_list[i].filt_rules,
+                                 &sw->recp_list[i].filt_replay_rules);
+
+       return 0;
+}
+
+/**
+ * ice_replay_vsi - replay VSI configuration
+ * @hw: pointer to the hw struct
+ * @vsi_handle: driver VSI handle
+ *
+ * Restore all VSI configuration after reset. It is required to call this
+ * function with main VSI first.
+ */
+enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
+{
+       enum ice_status status;
+
+       if (!ice_is_vsi_valid(hw, vsi_handle))
+               return ICE_ERR_PARAM;
+
+       /* Replay pre-initialization if there is any */
+       if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
+               status = ice_replay_pre_init(hw);
+               if (status)
+                       return status;
+       }
+
+       /* Replay per VSI all filters */
+       status = ice_replay_vsi_all_fltr(hw, vsi_handle);
+       return status;
+}
+
+/**
+ * ice_replay_post - post replay configuration cleanup
+ * @hw: pointer to the hw struct
+ *
+ * Post replay cleanup.
+ */
+void ice_replay_post(struct ice_hw *hw)
+{
+       /* Delete old entries from replay filter list head */
+       ice_rm_all_sw_replay_rule_info(hw);
+}
+
+/**
+ * ice_stat_update40 - read 40 bit stat from the chip and update stat values
+ * @hw: ptr to the hardware info
+ * @hireg: high 32 bit HW register to read from
+ * @loreg: low 32 bit HW register to read from
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
+ * @prev_stat: ptr to previous loaded stat value
+ * @cur_stat: ptr to current stat value
+ */
+void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+                      bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
+{
+       u64 new_data;
+
+       new_data = rd32(hw, loreg);
+       new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+
+       /* device stats are not reset at PFR, they likely will not be zeroed
+        * when the driver starts. So save the first values read and use them as
+        * offsets to be subtracted from the raw values in order to report stats
+        * that count from zero.
+        */
+       if (!prev_stat_loaded)
+               *prev_stat = new_data;
+       if (new_data >= *prev_stat)
+               *cur_stat = new_data - *prev_stat;
+       else
+               /* to manage the potential roll-over */
+               *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
+       *cur_stat &= 0xFFFFFFFFFFULL;
+}
+
+/**
+ * ice_stat_update32 - read 32 bit stat from the chip and update stat values
+ * @hw: ptr to the hardware info
+ * @reg: HW register to read from
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
+ * @prev_stat: ptr to previous loaded stat value
+ * @cur_stat: ptr to current stat value
+ */
+void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+                      u64 *prev_stat, u64 *cur_stat)
+{
+       u32 new_data;
+
+       new_data = rd32(hw, reg);
+
+       /* device stats are not reset at PFR, they likely will not be zeroed
+        * when the driver starts. So save the first values read and use them as
+        * offsets to be subtracted from the raw values in order to report stats
+        * that count from zero.
+        */
+       if (!prev_stat_loaded)
+               *prev_stat = new_data;
+       if (new_data >= *prev_stat)
+               *cur_stat = new_data - *prev_stat;
+       else
+               /* to manage the potential roll-over */
+               *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
+}
index aac2d6cadaafcf882b0db178694fef9152ada593..1900681289a4c1c55985750f0ce6635d07abf6d0 100644 (file)
@@ -7,6 +7,7 @@
 #include "ice.h"
 #include "ice_type.h"
 #include "ice_switch.h"
+#include <linux/avf/virtchnl.h>
 
 void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
                  u16 buf_len);
@@ -21,6 +22,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
                  struct ice_rq_event_info *e, u16 *pending);
 enum ice_status
 ice_get_link_status(struct ice_port_info *pi, bool *link_up);
+enum ice_status ice_update_link_info(struct ice_port_info *pi);
 enum ice_status
 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
                enum ice_aq_res_access_type access, u32 timeout);
@@ -37,17 +39,18 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
                  u32 rxq_index);
 
 enum ice_status
-ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
                   u16 lut_size);
 enum ice_status
-ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
                   u16 lut_size);
 enum ice_status
-ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
                   struct ice_aqc_get_set_rss_keys *keys);
 enum ice_status
-ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
                   struct ice_aqc_get_set_rss_keys *keys);
+
 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
@@ -87,13 +90,20 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
                      struct ice_sq_cd *cd);
 enum ice_status
 ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
-               u32 *q_teids, struct ice_sq_cd *cmd_details);
+               u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
+               struct ice_sq_cd *cmd_details);
 enum ice_status
-ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
                u16 *max_lanqs);
 enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
                struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
                struct ice_sq_cd *cd);
+enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
+void ice_replay_post(struct ice_hw *hw);
 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
+void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+                      bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
+void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+                      u64 *prev_stat, u64 *cur_stat);
 #endif /* _ICE_COMMON_H_ */
index 1fe026a65d752a1ac37da57d461e3957cea5ec6d..84c967294eafc079e9884ef6a26cad19c2382231 100644 (file)
@@ -32,6 +32,36 @@ static void ice_adminq_init_regs(struct ice_hw *hw)
        cq->rq.head_mask = PF_FW_ARQH_ARQH_M;
 }
 
+/**
+ * ice_mailbox_init_regs - Initialize Mailbox registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_mailbox_init_regs(struct ice_hw *hw)
+{
+       struct ice_ctl_q_info *cq = &hw->mailboxq;
+
+       /* set head and tail registers in our local struct */
+       cq->sq.head = PF_MBX_ATQH;
+       cq->sq.tail = PF_MBX_ATQT;
+       cq->sq.len = PF_MBX_ATQLEN;
+       cq->sq.bah = PF_MBX_ATQBAH;
+       cq->sq.bal = PF_MBX_ATQBAL;
+       cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
+       cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
+       cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;
+
+       cq->rq.head = PF_MBX_ARQH;
+       cq->rq.tail = PF_MBX_ARQT;
+       cq->rq.len = PF_MBX_ARQLEN;
+       cq->rq.bah = PF_MBX_ARQBAH;
+       cq->rq.bal = PF_MBX_ARQBAL;
+       cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
+       cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
+       cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
+}
+
 /**
  * ice_check_sq_alive
  * @hw: pointer to the hw struct
@@ -518,22 +548,31 @@ shutdown_sq_out:
 
 /**
  * ice_aq_ver_check - Check the reported AQ API version.
- * @fw_branch: The "branch" of FW, typically describes the device type
- * @fw_major: The major version of the FW API
- * @fw_minor: The minor version increment of the FW API
+ * @hw: pointer to the hardware structure
  *
  * Checks if the driver should load on a given AQ API version.
  *
  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
  */
-static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor)
+static bool ice_aq_ver_check(struct ice_hw *hw)
 {
-       if (fw_branch != EXP_FW_API_VER_BRANCH)
-               return false;
-       if (fw_major != EXP_FW_API_VER_MAJOR)
-               return false;
-       if (fw_minor != EXP_FW_API_VER_MINOR)
+       if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
+               /* Major API version is newer than expected, don't load */
+               dev_warn(ice_hw_to_dev(hw),
+                        "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
                return false;
+       } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
+               if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
+                       dev_info(ice_hw_to_dev(hw),
+                                "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+               else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
+                       dev_info(ice_hw_to_dev(hw),
+                                "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+       } else {
+               /* Major API version is older than expected, log a warning */
+               dev_info(ice_hw_to_dev(hw),
+                        "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+       }
        return true;
 }
 
@@ -588,8 +627,7 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
        if (status)
                goto init_ctrlq_free_rq;
 
-       if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver,
-                             hw->api_min_ver)) {
+       if (!ice_aq_ver_check(hw)) {
                status = ICE_ERR_FW_API_VER;
                goto init_ctrlq_free_rq;
        }
@@ -597,11 +635,11 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
        return 0;
 
 init_ctrlq_free_rq:
-       if (cq->rq.head) {
+       if (cq->rq.count) {
                ice_shutdown_rq(hw, cq);
                mutex_destroy(&cq->rq_lock);
        }
-       if (cq->sq.head) {
+       if (cq->sq.count) {
                ice_shutdown_sq(hw, cq);
                mutex_destroy(&cq->sq_lock);
        }
@@ -631,6 +669,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
                ice_adminq_init_regs(hw);
                cq = &hw->adminq;
                break;
+       case ICE_CTL_Q_MAILBOX:
+               ice_mailbox_init_regs(hw);
+               cq = &hw->mailboxq;
+               break;
        default:
                return ICE_ERR_PARAM;
        }
@@ -688,7 +730,12 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
        if (ret_code)
                return ret_code;
 
-       return ice_init_check_adminq(hw);
+       ret_code = ice_init_check_adminq(hw);
+       if (ret_code)
+               return ret_code;
+
+       /* Init Mailbox queue */
+       return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 }
 
 /**
@@ -706,15 +753,18 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
                if (ice_check_sq_alive(hw, cq))
                        ice_aq_q_shutdown(hw, true);
                break;
+       case ICE_CTL_Q_MAILBOX:
+               cq = &hw->mailboxq;
+               break;
        default:
                return;
        }
 
-       if (cq->sq.head) {
+       if (cq->sq.count) {
                ice_shutdown_sq(hw, cq);
                mutex_destroy(&cq->sq_lock);
        }
-       if (cq->rq.head) {
+       if (cq->rq.count) {
                ice_shutdown_rq(hw, cq);
                mutex_destroy(&cq->rq_lock);
        }
@@ -728,6 +778,8 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
 {
        /* Shutdown FW admin queue */
        ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+       /* Shutdown PF-VF Mailbox */
+       ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 }
 
 /**
@@ -850,7 +902,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
 
        details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
        if (cd)
-               memcpy(details, cd, sizeof(*details));
+               *details = *cd;
        else
                memset(details, 0, sizeof(*details));
 
index ea02b89243e2ceded547fa7be26ca61a24a1b865..437f832fd7c4a95b65fc52db563816ab8788840b 100644 (file)
@@ -8,6 +8,7 @@
 
 /* Maximum buffer lengths for all control queue types */
 #define ICE_AQ_MAX_BUF_LEN 4096
+#define ICE_MBXQ_MAX_BUF_LEN 4096
 
 #define ICE_CTL_Q_DESC(R, i) \
        (&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
@@ -28,6 +29,7 @@
 enum ice_ctl_q {
        ICE_CTL_Q_UNKNOWN = 0,
        ICE_CTL_Q_ADMIN,
+       ICE_CTL_Q_MAILBOX,
 };
 
 /* Control Queue default settings */
index 0e14d7215a6e023b11cd1fd45451261c89178148..a6f0a5c0c3057fb08b068b69e5f8ba883e2473cc 100644 (file)
@@ -5,15 +5,11 @@
 #define _ICE_DEVIDS_H_
 
 /* Device IDs */
-/* Intel(R) Ethernet Controller C810 for backplane */
+/* Intel(R) Ethernet Controller E810-C for backplane */
 #define ICE_DEV_ID_C810_BACKPLANE      0x1591
-/* Intel(R) Ethernet Controller C810 for QSFP */
+/* Intel(R) Ethernet Controller E810-C for QSFP */
 #define ICE_DEV_ID_C810_QSFP           0x1592
-/* Intel(R) Ethernet Controller C810 for SFP */
+/* Intel(R) Ethernet Controller E810-C for SFP */
 #define ICE_DEV_ID_C810_SFP            0x1593
-/* Intel(R) Ethernet Controller C810/X557-AT 10GBASE-T */
-#define ICE_DEV_ID_C810_10G_BASE_T     0x1594
-/* Intel(R) Ethernet Controller C810 1GbE */
-#define ICE_DEV_ID_C810_SGMII          0x1595
 
 #endif /* _ICE_DEVIDS_H_ */
index db2c502ae9327edfe8365fec085e749b3ccfd037..96923580f2a6c2fdb88c88c2f1e88f0b4154f67c 100644 (file)
@@ -1198,9 +1198,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
        ring->tx_max_pending = ICE_MAX_NUM_DESC;
        ring->rx_pending = vsi->rx_rings[0]->count;
        ring->tx_pending = vsi->tx_rings[0]->count;
-       ring->rx_mini_pending = ICE_MIN_NUM_DESC;
+
+       /* Rx mini and jumbo rings are not supported */
        ring->rx_mini_max_pending = 0;
        ring->rx_jumbo_max_pending = 0;
+       ring->rx_mini_pending = 0;
        ring->rx_jumbo_pending = 0;
 }
 
@@ -1218,14 +1220,23 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
            ring->tx_pending < ICE_MIN_NUM_DESC ||
            ring->rx_pending > ICE_MAX_NUM_DESC ||
            ring->rx_pending < ICE_MIN_NUM_DESC) {
-               netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+               netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
                           ring->tx_pending, ring->rx_pending,
-                          ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC);
+                          ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC,
+                          ICE_REQ_DESC_MULTIPLE);
                return -EINVAL;
        }
 
        new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
+       if (new_tx_cnt != ring->tx_pending)
+               netdev_info(netdev,
+                           "Requested Tx descriptor count rounded up to %d\n",
+                           new_tx_cnt);
        new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
+       if (new_rx_cnt != ring->rx_pending)
+               netdev_info(netdev,
+                           "Requested Rx descriptor count rounded up to %d\n",
+                           new_rx_cnt);
 
        /* if nothing to do return success */
        if (new_tx_cnt == vsi->tx_rings[0]->count &&
index 88f11498804b388ddc0880b3ccbd9db6d24ca78e..a6679a9bfd3a598eadae94b1805293cc300c1238 100644 (file)
 #define PF_FW_ATQLEN_ATQCRIT_M                 BIT(30)
 #define PF_FW_ATQLEN_ATQENABLE_M               BIT(31)
 #define PF_FW_ATQT                             0x00080400
+#define PF_MBX_ARQBAH                          0x0022E400
+#define PF_MBX_ARQBAL                          0x0022E380
+#define PF_MBX_ARQH                            0x0022E500
+#define PF_MBX_ARQH_ARQH_M                     ICE_M(0x3FF, 0)
+#define PF_MBX_ARQLEN                          0x0022E480
+#define PF_MBX_ARQLEN_ARQLEN_M                 ICE_M(0x3FF, 0)
+#define PF_MBX_ARQLEN_ARQENABLE_M              BIT(31)
+#define PF_MBX_ARQT                            0x0022E580
+#define PF_MBX_ATQBAH                          0x0022E180
+#define PF_MBX_ATQBAL                          0x0022E100
+#define PF_MBX_ATQH                            0x0022E280
+#define PF_MBX_ATQH_ATQH_M                     ICE_M(0x3FF, 0)
+#define PF_MBX_ATQLEN                          0x0022E200
+#define PF_MBX_ATQLEN_ATQLEN_M                 ICE_M(0x3FF, 0)
+#define PF_MBX_ATQLEN_ATQENABLE_M              BIT(31)
+#define PF_MBX_ATQT                            0x0022E300
 #define GLFLXP_RXDID_FLAGS(_i, _j)             (0x0045D000 + ((_i) * 4 + (_j) * 256))
 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S      0
 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M      ICE_M(0x3F, 0)
 #define GLGEN_RTRIG_CORER_M                    BIT(0)
 #define GLGEN_RTRIG_GLOBR_M                    BIT(1)
 #define GLGEN_STAT                             0x000B612C
+#define GLGEN_VFLRSTAT(_i)                     (0x00093A04 + ((_i) * 4))
 #define PFGEN_CTRL                             0x00091000
 #define PFGEN_CTRL_PFSWR_M                     BIT(0)
 #define PFGEN_STATE                            0x00088000
 #define PRTGEN_STATUS                          0x000B8100
+#define VFGEN_RSTAT(_VF)                       (0x00074000 + ((_VF) * 4))
+#define VPGEN_VFRSTAT(_VF)                     (0x00090800 + ((_VF) * 4))
+#define VPGEN_VFRSTAT_VFRD_M                   BIT(0)
+#define VPGEN_VFRTRIG(_VF)                     (0x00090000 + ((_VF) * 4))
+#define VPGEN_VFRTRIG_VFSWR_M                  BIT(0)
 #define PFHMC_ERRORDATA                                0x00520500
 #define PFHMC_ERRORINFO                                0x00520400
 #define GLINT_DYN_CTL(_INT)                    (0x00160000 + ((_INT) * 4))
 #define GLINT_DYN_CTL_SW_ITR_INDX_M            ICE_M(0x3, 25)
 #define GLINT_DYN_CTL_INTENA_MSK_M             BIT(31)
 #define GLINT_ITR(_i, _INT)                    (0x00154000 + ((_i) * 8192 + (_INT) * 4))
+#define GLINT_RATE(_INT)                       (0x0015A000 + ((_INT) * 4))
+#define GLINT_RATE_INTRL_ENA_M                 BIT(6)
+#define GLINT_VECT2FUNC(_INT)                  (0x00162000 + ((_INT) * 4))
+#define GLINT_VECT2FUNC_VF_NUM_S               0
+#define GLINT_VECT2FUNC_VF_NUM_M               ICE_M(0xFF, 0)
+#define GLINT_VECT2FUNC_PF_NUM_S               12
+#define GLINT_VECT2FUNC_PF_NUM_M               ICE_M(0x7, 12)
+#define GLINT_VECT2FUNC_IS_PF_S                        16
+#define GLINT_VECT2FUNC_IS_PF_M                        BIT(16)
 #define PFINT_FW_CTL                           0x0016C800
 #define PFINT_FW_CTL_MSIX_INDX_M               ICE_M(0x7FF, 0)
 #define PFINT_FW_CTL_ITR_INDX_S                        11
 #define PFINT_FW_CTL_ITR_INDX_M                        ICE_M(0x3, 11)
 #define PFINT_FW_CTL_CAUSE_ENA_M               BIT(30)
+#define PFINT_MBX_CTL                          0x0016B280
+#define PFINT_MBX_CTL_MSIX_INDX_M              ICE_M(0x7FF, 0)
+#define PFINT_MBX_CTL_ITR_INDX_S               11
+#define PFINT_MBX_CTL_ITR_INDX_M               ICE_M(0x3, 11)
+#define PFINT_MBX_CTL_CAUSE_ENA_M              BIT(30)
 #define PFINT_OICR                             0x0016CA00
 #define PFINT_OICR_ECC_ERR_M                   BIT(16)
 #define PFINT_OICR_MAL_DETECT_M                        BIT(19)
 #define PFINT_OICR_PCI_EXCEPTION_M             BIT(21)
 #define PFINT_OICR_HMC_ERR_M                   BIT(26)
 #define PFINT_OICR_PE_CRITERR_M                        BIT(28)
+#define PFINT_OICR_VFLR_M                      BIT(29)
 #define PFINT_OICR_CTL                         0x0016CA80
 #define PFINT_OICR_CTL_MSIX_INDX_M             ICE_M(0x7FF, 0)
 #define PFINT_OICR_CTL_ITR_INDX_S              11
 #define QINT_TQCTL_MSIX_INDX_S                 0
 #define QINT_TQCTL_ITR_INDX_S                  11
 #define QINT_TQCTL_CAUSE_ENA_M                 BIT(30)
+#define VPINT_ALLOC(_VF)                       (0x001D1000 + ((_VF) * 4))
+#define VPINT_ALLOC_FIRST_S                    0
+#define VPINT_ALLOC_FIRST_M                    ICE_M(0x7FF, 0)
+#define VPINT_ALLOC_LAST_S                     12
+#define VPINT_ALLOC_LAST_M                     ICE_M(0x7FF, 12)
+#define VPINT_ALLOC_VALID_M                    BIT(31)
 #define QRX_CONTEXT(_i, _QRX)                  (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
 #define QRX_CTRL(_QRX)                         (0x00120000 + ((_QRX) * 4))
 #define QRX_CTRL_MAX_INDEX                     2047
 #define QRX_TAIL_MAX_INDEX                     2047
 #define QRX_TAIL_TAIL_S                                0
 #define QRX_TAIL_TAIL_M                                ICE_M(0x1FFF, 0)
+#define VPLAN_RX_QBASE(_VF)                    (0x00072000 + ((_VF) * 4))
+#define VPLAN_RX_QBASE_VFFIRSTQ_S              0
+#define VPLAN_RX_QBASE_VFFIRSTQ_M              ICE_M(0x7FF, 0)
+#define VPLAN_RX_QBASE_VFNUMQ_S                        16
+#define VPLAN_RX_QBASE_VFNUMQ_M                        ICE_M(0xFF, 16)
+#define VPLAN_RXQ_MAPENA(_VF)                  (0x00073000 + ((_VF) * 4))
+#define VPLAN_RXQ_MAPENA_RX_ENA_M              BIT(0)
+#define VPLAN_TX_QBASE(_VF)                    (0x001D1800 + ((_VF) * 4))
+#define VPLAN_TX_QBASE_VFFIRSTQ_S              0
+#define VPLAN_TX_QBASE_VFFIRSTQ_M              ICE_M(0x3FFF, 0)
+#define VPLAN_TX_QBASE_VFNUMQ_S                        16
+#define VPLAN_TX_QBASE_VFNUMQ_M                        ICE_M(0xFF, 16)
+#define VPLAN_TXQ_MAPENA(_VF)                  (0x00073800 + ((_VF) * 4))
+#define VPLAN_TXQ_MAPENA_TX_ENA_M              BIT(0)
 #define GL_MDET_RX                             0x00294C00
 #define GL_MDET_RX_QNUM_S                      0
 #define GL_MDET_RX_QNUM_M                      ICE_M(0x7FFF, 0)
 #define PF_MDET_TX_PQM_VALID_M                 BIT(0)
 #define PF_MDET_TX_TCLAN                       0x000FC000
 #define PF_MDET_TX_TCLAN_VALID_M               BIT(0)
+#define VP_MDET_RX(_VF)                                (0x00294400 + ((_VF) * 4))
+#define VP_MDET_RX_VALID_M                     BIT(0)
+#define VP_MDET_TX_PQM(_VF)                    (0x002D2000 + ((_VF) * 4))
+#define VP_MDET_TX_PQM_VALID_M                 BIT(0)
+#define VP_MDET_TX_TCLAN(_VF)                  (0x000FB800 + ((_VF) * 4))
+#define VP_MDET_TX_TCLAN_VALID_M               BIT(0)
+#define VP_MDET_TX_TDPU(_VF)                   (0x00040000 + ((_VF) * 4))
+#define VP_MDET_TX_TDPU_VALID_M                        BIT(0)
 #define GLNVM_FLA                              0x000B6108
 #define GLNVM_FLA_LOCKED_M                     BIT(6)
 #define GLNVM_GENS                             0x000B6100
 #define PF_FUNC_RID                            0x0009E880
 #define PF_FUNC_RID_FUNC_NUM_S                 0
 #define PF_FUNC_RID_FUNC_NUM_M                 ICE_M(0x7, 0)
+#define PF_PCI_CIAA                            0x0009E580
+#define PF_PCI_CIAA_VF_NUM_S                   12
+#define PF_PCI_CIAD                            0x0009E500
+#define GL_PWR_MODE_CTL                                0x000B820C
+#define GL_PWR_MODE_CTL_CAR_MAX_BW_S           30
+#define GL_PWR_MODE_CTL_CAR_MAX_BW_M           ICE_M(0x3, 30)
 #define GLPRT_BPRCH(_i)                                (0x00381384 + ((_i) * 8))
 #define GLPRT_BPRCL(_i)                                (0x00381380 + ((_i) * 8))
 #define GLPRT_BPTCH(_i)                                (0x00381244 + ((_i) * 8))
 #define GLV_UPTCH(_i)                          (0x0030A004 + ((_i) * 8))
 #define GLV_UPTCL(_i)                          (0x0030A000 + ((_i) * 8))
 #define VSIQF_HKEY_MAX_INDEX                   12
+#define VSIQF_HLUT_MAX_INDEX                   15
+#define VFINT_DYN_CTLN(_i)                     (0x00003800 + ((_i) * 4))
+#define VFINT_DYN_CTLN_CLEARPBA_M              BIT(1)
 
 #endif /* _ICE_HW_AUTOGEN_H_ */
index 94504023d86e2ad5a7409a3955ca2f5ce0bbe94d..7d2a66739e3f6605321af0ace9c6f57679ffb44f 100644 (file)
@@ -418,6 +418,7 @@ struct ice_tlan_ctx {
        u8  pf_num;
        u16 vmvf_num;
        u8  vmvf_type;
+#define ICE_TLAN_CTX_VMVF_TYPE_VF      0
 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ     1
 #define ICE_TLAN_CTX_VMVF_TYPE_PF      2
        u16 src_vsi;
@@ -473,4 +474,16 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
 {
        return ice_ptype_lkup[ptype];
 }
+
+#define ICE_LINK_SPEED_UNKNOWN         0
+#define ICE_LINK_SPEED_10MBPS          10
+#define ICE_LINK_SPEED_100MBPS         100
+#define ICE_LINK_SPEED_1000MBPS                1000
+#define ICE_LINK_SPEED_2500MBPS                2500
+#define ICE_LINK_SPEED_5000MBPS                5000
+#define ICE_LINK_SPEED_10000MBPS       10000
+#define ICE_LINK_SPEED_20000MBPS       20000
+#define ICE_LINK_SPEED_25000MBPS       25000
+#define ICE_LINK_SPEED_40000MBPS       40000
+
 #endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
new file mode 100644 (file)
index 0000000..49f1940
--- /dev/null
@@ -0,0 +1,2619 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+
+/**
+ * ice_setup_rx_ctx - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in RLAN context.
+ */
+static int ice_setup_rx_ctx(struct ice_ring *ring)
+{
+       struct ice_vsi *vsi = ring->vsi;
+       struct ice_hw *hw = &vsi->back->hw;
+       u32 rxdid = ICE_RXDID_FLEX_NIC;
+       struct ice_rlan_ctx rlan_ctx;
+       u32 regval;
+       u16 pf_q;
+       int err;
+
+       /* what is RX queue number in global space of 2K Rx queues */
+       pf_q = vsi->rxq_map[ring->q_index];
+
+       /* clear the context structure first */
+       memset(&rlan_ctx, 0, sizeof(rlan_ctx));
+
+       rlan_ctx.base = ring->dma >> 7;
+
+       rlan_ctx.qlen = ring->count;
+
+       /* Receive Packet Data Buffer Size.
+        * The Packet Data Buffer Size is defined in 128 byte units.
+        */
+       rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
+
+       /* use 32 byte descriptors */
+       rlan_ctx.dsize = 1;
+
+       /* Strip the Ethernet CRC bytes before the packet is posted to host
+        * memory.
+        */
+       rlan_ctx.crcstrip = 1;
+
+       /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
+       rlan_ctx.l2tsel = 1;
+
+       rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
+       rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+       rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
+
+       /* This controls whether VLAN is stripped from inner headers
+        * The VLAN in the inner L2 header is stripped to the receive
+        * descriptor if enabled by this flag.
+        */
+       rlan_ctx.showiv = 0;
+
+       /* Max packet size for this queue - must not be set to a larger value
+        * than 5 x DBUF
+        */
+       rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
+                              ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
+
+       /* Rx queue threshold in units of 64 */
+       rlan_ctx.lrxqthresh = 1;
+
+        /* Enable Flexible Descriptors in the queue context which
+         * allows this driver to select a specific receive descriptor format
+         */
+       if (vsi->type != ICE_VSI_VF) {
+               regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+               regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+                       QRXFLXP_CNTXT_RXDID_IDX_M;
+
+               /* increasing context priority to pick up profile id;
+                * default is 0x01; setting to 0x03 to ensure profile
+                * is programming if prev context is of same priority
+                */
+               regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+                       QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+               wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+       }
+
+       /* Absolute queue number out of 2K needs to be passed */
+       err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
+       if (err) {
+               dev_err(&vsi->back->pdev->dev,
+                       "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+                       pf_q, err);
+               return -EIO;
+       }
+
+       if (vsi->type == ICE_VSI_VF)
+               return 0;
+
+       /* init queue specific tail register */
+       ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
+       writel(0, ring->tail);
+       ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
+
+       return 0;
+}
+
+/**
+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
+ * @ring: The Tx ring to configure
+ * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
+ * @pf_q: queue index in the PF space
+ *
+ * Configure the Tx descriptor ring in TLAN context.
+ */
+static void
+ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+       struct ice_vsi *vsi = ring->vsi;
+       struct ice_hw *hw = &vsi->back->hw;
+
+       tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
+
+       tlan_ctx->port_num = vsi->port_info->lport;
+
+       /* Transmit Queue Length */
+       tlan_ctx->qlen = ring->count;
+
+       /* PF number */
+       tlan_ctx->pf_num = hw->pf_id;
+
+       /* queue belongs to a specific VSI type
+        * VF / VM index should be programmed per vmvf_type setting:
+        * for vmvf_type = VF, it is VF number between 0-256
+        * for vmvf_type = VM, it is VM number between 0-767
+        * for PF or EMP this field should be set to zero
+        */
+       switch (vsi->type) {
+       case ICE_VSI_PF:
+               tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+               break;
+       case ICE_VSI_VF:
+               /* Firmware expects vmvf_num to be absolute VF id */
+               tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
+               tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+               break;
+       default:
+               return;
+       }
+
+       /* make sure the context is associated with the right VSI */
+       tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+
+       tlan_ctx->tso_ena = ICE_TX_LEGACY;
+       tlan_ctx->tso_qnum = pf_q;
+
+       /* Legacy or Advanced Host Interface:
+        * 0: Advanced Host Interface
+        * 1: Legacy Host Interface
+        */
+       tlan_ctx->legacy_int = ICE_TX_LEGACY;
+}
+
+/**
+ * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @ena: enable or disable state of the queue
+ *
+ * This routine will wait for the given Rx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ */
+static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
+{
+       int i;
+
+       for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
+               u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
+
+               if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+                       break;
+
+               usleep_range(10, 20);
+       }
+       if (i >= ICE_Q_WAIT_RETRY_LIMIT)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+/**
+ * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
+ * @vsi: the VSI being configured
+ * @ena: start or stop the Rx rings
+ */
+static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
+{
+       struct ice_pf *pf = vsi->back;
+       struct ice_hw *hw = &pf->hw;
+       int i, j, ret = 0;
+
+       for (i = 0; i < vsi->num_rxq; i++) {
+               int pf_q = vsi->rxq_map[i];
+               u32 rx_reg;
+
+               for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
+                       rx_reg = rd32(hw, QRX_CTRL(pf_q));
+                       if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
+                           ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
+                               break;
+                       usleep_range(1000, 2000);
+               }
+
+               /* Skip if the queue is already in the requested state */
+               if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+                       continue;
+
+               /* turn on/off the queue */
+               if (ena)
+                       rx_reg |= QRX_CTRL_QENA_REQ_M;
+               else
+                       rx_reg &= ~QRX_CTRL_QENA_REQ_M;
+               wr32(hw, QRX_CTRL(pf_q), rx_reg);
+
+               /* wait for the change to finish */
+               ret = ice_pf_rxq_wait(pf, pf_q, ena);
+               if (ret) {
+                       dev_err(&pf->pdev->dev,
+                               "VSI idx %d Rx ring %d %sable timeout\n",
+                               vsi->idx, pf_q, (ena ? "en" : "dis"));
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+/**
+ * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
+ * @vsi: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ */
+static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
+{
+       struct ice_pf *pf = vsi->back;
+
+       /* allocate memory for both Tx and Rx ring pointers */
+       vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+                                    sizeof(struct ice_ring *), GFP_KERNEL);
+       if (!vsi->tx_rings)
+               goto err_txrings;
+
+       vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+                                    sizeof(struct ice_ring *), GFP_KERNEL);
+       if (!vsi->rx_rings)
+               goto err_rxrings;
+
+       if (alloc_qvectors) {
+               /* allocate memory for q_vector pointers */
+               vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
+                                             vsi->num_q_vectors,
+                                             sizeof(struct ice_q_vector *),
+                                             GFP_KERNEL);
+               if (!vsi->q_vectors)
+                       goto err_vectors;
+       }
+
+       return 0;
+
+err_vectors:
+       devm_kfree(&pf->pdev->dev, vsi->rx_rings);
+err_rxrings:
+       devm_kfree(&pf->pdev->dev, vsi->tx_rings);
+err_txrings:
+       return -ENOMEM;
+}
+
+/**
+ * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ */
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = vsi->back;
+
+       switch (vsi->type) {
+       case ICE_VSI_PF:
+               vsi->alloc_txq = pf->num_lan_tx;
+               vsi->alloc_rxq = pf->num_lan_rx;
+               vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
+               vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
+               break;
+       case ICE_VSI_VF:
+               vsi->alloc_txq = pf->num_vf_qps;
+               vsi->alloc_rxq = pf->num_vf_qps;
+               /* pf->num_vf_msix includes (VF miscellaneous vector +
+                * data queue interrupts). Since vsi->num_q_vectors is number
+                * of queues vectors, subtract 1 from the original vector
+                * count
+                */
+               vsi->num_q_vectors = pf->num_vf_msix - 1;
+               break;
+       default:
+               dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
+                        vsi->type);
+               break;
+       }
+}
+
+/**
+ * ice_get_free_slot - get the next non-NULL location index in array
+ * @array: array to search
+ * @size: size of the array
+ * @curr: last known occupied index to be used as a search hint
+ *
+ * void * is being used to keep the functionality generic. This lets us use this
+ * function on any array of pointers.
+ */
+static int ice_get_free_slot(void *array, int size, int curr)
+{
+       int **tmp_array = (int **)array;
+       int next;
+
+       if (curr < (size - 1) && !tmp_array[curr + 1]) {
+               next = curr + 1;
+       } else {
+               int i = 0;
+
+               while ((i < size) && (tmp_array[i]))
+                       i++;
+               if (i == size)
+                       next = ICE_NO_VSI;
+               else
+                       next = i;
+       }
+       return next;
+}
+
+/**
+ * ice_vsi_delete - delete a VSI from the switch
+ * @vsi: pointer to VSI being removed
+ */
+void ice_vsi_delete(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = vsi->back;
+       struct ice_vsi_ctx ctxt;
+       enum ice_status status;
+
+       if (vsi->type == ICE_VSI_VF)
+               ctxt.vf_num = vsi->vf_id;
+       ctxt.vsi_num = vsi->vsi_num;
+
+       memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
+
+       status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL);
+       if (status)
+               dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
+                       vsi->vsi_num);
+}
+
+/**
+ * ice_vsi_free_arrays - clean up VSI resources
+ * @vsi: pointer to VSI being cleared
+ * @free_qvectors: bool to specify if q_vectors should be deallocated
+ */
+static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
+{
+       struct ice_pf *pf = vsi->back;
+
+       /* free the ring and vector containers */
+       if (free_qvectors && vsi->q_vectors) {
+               devm_kfree(&pf->pdev->dev, vsi->q_vectors);
+               vsi->q_vectors = NULL;
+       }
+       if (vsi->tx_rings) {
+               devm_kfree(&pf->pdev->dev, vsi->tx_rings);
+               vsi->tx_rings = NULL;
+       }
+       if (vsi->rx_rings) {
+               devm_kfree(&pf->pdev->dev, vsi->rx_rings);
+               vsi->rx_rings = NULL;
+       }
+}
+
+/**
+ * ice_vsi_clear - clean up and deallocate the provided VSI
+ * @vsi: pointer to VSI being cleared
+ *
+ * This deallocates the VSI's queue resources, removes it from the PF's
+ * VSI array if necessary, and deallocates the VSI
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_vsi_clear(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = NULL;
+
+       if (!vsi)
+               return 0;
+
+       if (!vsi->back)
+               return -EINVAL;
+
+       pf = vsi->back;
+
+       if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
+               dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
+                       vsi->idx);
+               return -EINVAL;
+       }
+
+       mutex_lock(&pf->sw_mutex);
+       /* updates the PF for this cleared VSI */
+
+       pf->vsi[vsi->idx] = NULL;
+       if (vsi->idx < pf->next_vsi)
+               pf->next_vsi = vsi->idx;
+
+       ice_vsi_free_arrays(vsi, true);
+       mutex_unlock(&pf->sw_mutex);
+       devm_kfree(&pf->pdev->dev, vsi);
+
+       return 0;
+}
+
+/**
+ * ice_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
+{
+       struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
+
+       if (!q_vector->tx.ring && !q_vector->rx.ring)
+               return IRQ_HANDLED;
+
+       napi_schedule(&q_vector->napi);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * ice_vsi_alloc - Allocates the next available struct VSI in the PF
+ * @pf: board private structure
+ * @type: type of VSI
+ *
+ * returns a pointer to a VSI on success, NULL on failure.
+ */
+static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
+{
+       struct ice_vsi *vsi = NULL;
+
+       /* Need to protect the allocation of the VSIs at the PF level */
+       mutex_lock(&pf->sw_mutex);
+
+       /* If we have already allocated our maximum number of VSIs,
+        * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
+        * is available to be populated
+        */
+       if (pf->next_vsi == ICE_NO_VSI) {
+               dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
+               goto unlock_pf;
+       }
+
+       vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
+       if (!vsi)
+               goto unlock_pf;
+
+       vsi->type = type;
+       vsi->back = pf;
+       set_bit(__ICE_DOWN, vsi->state);
+       vsi->idx = pf->next_vsi;
+       vsi->work_lmt = ICE_DFLT_IRQ_WORK;
+
+       ice_vsi_set_num_qs(vsi);
+
+       switch (vsi->type) {
+       case ICE_VSI_PF:
+               if (ice_vsi_alloc_arrays(vsi, true))
+                       goto err_rings;
+
+               /* Setup default MSIX irq handler for VSI */
+               vsi->irq_handler = ice_msix_clean_rings;
+               break;
+       case ICE_VSI_VF:
+               if (ice_vsi_alloc_arrays(vsi, true))
+                       goto err_rings;
+               break;
+       default:
+               dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
+               goto unlock_pf;
+       }
+
+       /* fill VSI slot in the PF struct */
+       pf->vsi[pf->next_vsi] = vsi;
+
+       /* prepare pf->next_vsi for next use */
+       pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
+                                        pf->next_vsi);
+       goto unlock_pf;
+
+err_rings:
+       devm_kfree(&pf->pdev->dev, vsi);
+       vsi = NULL;
+unlock_pf:
+       mutex_unlock(&pf->sw_mutex);
+       return vsi;
+}
+
+/**
+ * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
+ * @vsi: the VSI getting queues
+ *
+ * Return 0 on success and a negative value on error
+ */
+static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = vsi->back;
+       int offset, ret = 0;
+
+       mutex_lock(&pf->avail_q_mutex);
+       /* look for contiguous block of queues for Tx */
+       offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
+                                           0, vsi->alloc_txq, 0);
+       if (offset < ICE_MAX_TXQS) {
+               int i;
+
+               bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
+               for (i = 0; i < vsi->alloc_txq; i++)
+                       vsi->txq_map[i] = i + offset;
+       } else {
+               ret = -ENOMEM;
+               vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
+       }
+
+       /* look for contiguous block of queues for Rx */
+       offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
+                                           0, vsi->alloc_rxq, 0);
+       if (offset < ICE_MAX_RXQS) {
+               int i;
+
+               bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
+               for (i = 0; i < vsi->alloc_rxq; i++)
+                       vsi->rxq_map[i] = i + offset;
+       } else {
+               ret = -ENOMEM;
+               vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
+       }
+       mutex_unlock(&pf->avail_q_mutex);
+
+       return ret;
+}
+
+/**
+ * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
+ * @vsi: the VSI getting queues
+ *
+ * Return 0 on success and a negative value on error
+ */
+static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = vsi->back;
+       int i, index = 0;
+
+       mutex_lock(&pf->avail_q_mutex);
+
+       if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
+               for (i = 0; i < vsi->alloc_txq; i++) {
+                       index = find_next_zero_bit(pf->avail_txqs,
+                                                  ICE_MAX_TXQS, index);
+                       if (index < ICE_MAX_TXQS) {
+                               set_bit(index, pf->avail_txqs);
+                               vsi->txq_map[i] = index;
+                       } else {
+                               goto err_scatter_tx;
+                       }
+               }
+       }
+
+       if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
+               for (i = 0; i < vsi->alloc_rxq; i++) {
+                       index = find_next_zero_bit(pf->avail_rxqs,
+                                                  ICE_MAX_RXQS, index);
+                       if (index < ICE_MAX_RXQS) {
+                               set_bit(index, pf->avail_rxqs);
+                               vsi->rxq_map[i] = index;
+                       } else {
+                               goto err_scatter_rx;
+                       }
+               }
+       }
+
+       mutex_unlock(&pf->avail_q_mutex);
+       return 0;
+
+err_scatter_rx:
+       /* unflag any queues we have grabbed (i is failed position) */
+       for (index = 0; index < i; index++) {
+               clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
+               vsi->rxq_map[index] = 0;
+       }
+       i = vsi->alloc_txq;
+err_scatter_tx:
+       /* i is either position of failed attempt or vsi->alloc_txq */
+       for (index = 0; index < i; index++) {
+               clear_bit(vsi->txq_map[index], pf->avail_txqs);
+               vsi->txq_map[index] = 0;
+       }
+
+       mutex_unlock(&pf->avail_q_mutex);
+       return -ENOMEM;
+}
+
+/**
+ * ice_vsi_get_qs - Assign queues from PF to VSI
+ * @vsi: the VSI to assign queues to
+ *
+ * Returns 0 on success and a negative value on error
+ */
+static int ice_vsi_get_qs(struct ice_vsi *vsi)
+{
+       int ret = 0;
+
+       vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
+       vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
+
+       /* NOTE: ice_vsi_get_qs_contig() will set the Rx/Tx mapping
+        * modes individually to scatter if assigning contiguous queues
+        * to Rx or Tx fails
+        */
+       ret = ice_vsi_get_qs_contig(vsi);
+       if (ret < 0) {
+               if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
+                       vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
+                                              ICE_MAX_SCATTER_TXQS);
+               if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
+                       vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
+                                              ICE_MAX_SCATTER_RXQS);
+               ret = ice_vsi_get_qs_scatter(vsi);
+       }
+
+       return ret;
+}
+
+/**
+ * ice_vsi_put_qs - Release queues from VSI to PF
+ * @vsi: the VSI that is going to release queues
+ */
+void ice_vsi_put_qs(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = vsi->back;
+       int i;
+
+       mutex_lock(&pf->avail_q_mutex);
+
+       for (i = 0; i < vsi->alloc_txq; i++) {
+               clear_bit(vsi->txq_map[i], pf->avail_txqs);
+               vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
+       }
+
+       for (i = 0; i < vsi->alloc_rxq; i++) {
+               clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
+               vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
+       }
+
+       mutex_unlock(&pf->avail_q_mutex);
+}
+
+/**
+ * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
+ * @vsi: the VSI being removed
+ */
+static void ice_rss_clean(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf;
+
+       pf = vsi->back;
+
+       if (vsi->rss_hkey_user)
+               devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
+       if (vsi->rss_lut_user)
+               devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
+}
+
+/**
+ * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
+ * @vsi: the VSI being configured
+ */
+static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
+{
+       struct ice_hw_common_caps *cap;
+       struct ice_pf *pf = vsi->back;
+
+       if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+               vsi->rss_size = 1;
+               return;
+       }
+
+       cap = &pf->hw.func_caps.common_cap;
+       switch (vsi->type) {
+       case ICE_VSI_PF:
+               /* PF VSI will inherit RSS instance of PF */
+               vsi->rss_table_size = cap->rss_table_size;
+               vsi->rss_size = min_t(int, num_online_cpus(),
+                                     BIT(cap->rss_table_entry_width));
+               vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+               break;
+       case ICE_VSI_VF:
+               /* VF VSI will gets a small RSS table
+                * For VSI_LUT, LUT size should be set to 64 bytes
+                */
+               vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+               vsi->rss_size = min_t(int, num_online_cpus(),
+                                     BIT(cap->rss_table_entry_width));
+               vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
+               break;
+       default:
+               dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
+                        vsi->type);
+               break;
+       }
+}
+
+/**
+ * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
+ * @ctxt: the VSI context being set
+ *
+ * This initializes a default VSI context for all sections except the Queues.
+ */
+static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
+{
+       u32 table = 0;
+
+       memset(&ctxt->info, 0, sizeof(ctxt->info));
+       /* VSI's should be allocated from shared pool */
+       ctxt->alloc_from_pool = true;
+       /* Src pruning enabled by default */
+       ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
+       /* Traffic from VSI can be sent to LAN */
+       ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
+       /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
+        * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
+        * packets untagged/tagged.
+        */
+       ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
+                                 ICE_AQ_VSI_VLAN_MODE_M) >>
+                                ICE_AQ_VSI_VLAN_MODE_S);
+       /* Have 1:1 UP mapping for both ingress/egress tables */
+       table |= ICE_UP_TABLE_TRANSLATE(0, 0);
+       table |= ICE_UP_TABLE_TRANSLATE(1, 1);
+       table |= ICE_UP_TABLE_TRANSLATE(2, 2);
+       table |= ICE_UP_TABLE_TRANSLATE(3, 3);
+       table |= ICE_UP_TABLE_TRANSLATE(4, 4);
+       table |= ICE_UP_TABLE_TRANSLATE(5, 5);
+       table |= ICE_UP_TABLE_TRANSLATE(6, 6);
+       table |= ICE_UP_TABLE_TRANSLATE(7, 7);
+       ctxt->info.ingress_table = cpu_to_le32(table);
+       ctxt->info.egress_table = cpu_to_le32(table);
+       /* Have 1:1 UP mapping for outer to inner UP table */
+       ctxt->info.outer_up_table = cpu_to_le32(table);
+       /* No Outer tag support outer_tag_flags remains to zero */
+}
+
+/**
+ * ice_vsi_setup_q_map - Setup a VSI queue map
+ * @vsi: the VSI being configured
+ * @ctxt: VSI context structure
+ */
+static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+{
+       u16 offset = 0, qmap = 0, numq_tc;
+       u16 pow = 0, max_rss = 0, qcount;
+       u16 qcount_tx = vsi->alloc_txq;
+       u16 qcount_rx = vsi->alloc_rxq;
+       bool ena_tc0 = false;
+       int i;
+
+       /* at least TC0 should be enabled by default */
+       if (vsi->tc_cfg.numtc) {
+               if (!(vsi->tc_cfg.ena_tc & BIT(0)))
+                       ena_tc0 = true;
+       } else {
+               ena_tc0 = true;
+       }
+
+       if (ena_tc0) {
+               vsi->tc_cfg.numtc++;
+               vsi->tc_cfg.ena_tc |= 1;
+       }
+
+       numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+
+       /* TC mapping is a function of the number of Rx queues assigned to the
+        * VSI for each traffic class and the offset of these queues.
+        * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
+        * queues allocated to TC0. No:of queues is a power-of-2.
+        *
+        * If TC is not enabled, the queue offset is set to 0, and allocate one
+        * queue, this way, traffic for the given TC will be sent to the default
+        * queue.
+        *
+        * Setup number and offset of Rx queues for all TCs for the VSI
+        */
+
+       qcount = numq_tc;
+       /* qcount will change if RSS is enabled */
+       if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
+               if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
+                       if (vsi->type == ICE_VSI_PF)
+                               max_rss = ICE_MAX_LG_RSS_QS;
+                       else
+                               max_rss = ICE_MAX_SMALL_RSS_QS;
+                       qcount = min_t(int, numq_tc, max_rss);
+                       qcount = min_t(int, qcount, vsi->rss_size);
+               }
+       }
+
+       /* find the (rounded up) power-of-2 of qcount */
+       pow = order_base_2(qcount);
+
+       for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+               if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
+                       /* TC is not enabled */
+                       vsi->tc_cfg.tc_info[i].qoffset = 0;
+                       vsi->tc_cfg.tc_info[i].qcount = 1;
+                       ctxt->info.tc_mapping[i] = 0;
+                       continue;
+               }
+
+               /* TC is enabled */
+               vsi->tc_cfg.tc_info[i].qoffset = offset;
+               vsi->tc_cfg.tc_info[i].qcount = qcount;
+
+               qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
+                       ICE_AQ_VSI_TC_Q_OFFSET_M) |
+                       ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
+                        ICE_AQ_VSI_TC_Q_NUM_M);
+               offset += qcount;
+               ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
+       }
+
+       vsi->num_txq = qcount_tx;
+       vsi->num_rxq = offset;
+
+       if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
+               dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
+               /* since there is a chance that num_rxq could have been changed
+                * in the above for loop, make num_txq equal to num_rxq.
+                */
+               vsi->num_txq = vsi->num_rxq;
+       }
+
+       /* Rx queue mapping */
+       ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
+       /* q_mapping buffer holds the info for the first queue allocated for
+        * this VSI in the PF space and also the number of queues associated
+        * with this VSI.
+        */
+       ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
+       ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
+}
+
+/**
+ * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
+ * @ctxt: the VSI context being set
+ * @vsi: the VSI being configured
+ */
+static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
+{
+       u8 lut_type, hash_type;
+
+       switch (vsi->type) {
+       case ICE_VSI_PF:
+               /* PF VSI will inherit RSS instance of PF */
+               lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
+               hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+               break;
+       case ICE_VSI_VF:
+               /* VF VSI will gets a small RSS table which is a VSI LUT type */
+               lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+               hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+               break;
+       default:
+               dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
+                        vsi->type);
+               return;
+       }
+
+       ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
+                               ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
+                               ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
+                                ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+}
+
+/**
+ * ice_vsi_init - Create and initialize a VSI
+ * @vsi: the VSI being configured
+ *
+ * This initializes a VSI context depending on the VSI type to be added and
+ * passes it down to the add_vsi aq command to create a new VSI.
+ */
+static int ice_vsi_init(struct ice_vsi *vsi)
+{
+       struct ice_vsi_ctx ctxt = { 0 };
+       struct ice_pf *pf = vsi->back;
+       struct ice_hw *hw = &pf->hw;
+       int ret = 0;
+
+       switch (vsi->type) {
+       case ICE_VSI_PF:
+               ctxt.flags = ICE_AQ_VSI_TYPE_PF;
+               break;
+       case ICE_VSI_VF:
+               ctxt.flags = ICE_AQ_VSI_TYPE_VF;
+               /* VF number here is the absolute VF number (0-255) */
+               ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+               break;
+       default:
+               return -ENODEV;
+       }
+
+       ice_set_dflt_vsi_ctx(&ctxt);
+       /* if the switch is in VEB mode, allow VSI loopback */
+       if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
+               ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
+
+       /* Set LUT type and HASH type if RSS is enabled */
+       if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+               ice_set_rss_vsi_ctx(&ctxt, vsi);
+
+       ctxt.info.sw_id = vsi->port_info->sw_id;
+       ice_vsi_setup_q_map(vsi, &ctxt);
+
+       ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL);
+       if (ret) {
+               dev_err(&pf->pdev->dev,
+                       "Add VSI failed, err %d\n", ret);
+               return -EIO;
+       }
+
+       /* keep context for update VSI operations */
+       vsi->info = ctxt.info;
+
+       /* record VSI number returned */
+       vsi->vsi_num = ctxt.vsi_num;
+
+       return ret;
+}
+
+/**
+ * ice_free_q_vector - Free memory allocated for a specific interrupt vector
+ * @vsi: VSI having the memory freed
+ * @v_idx: index of the vector to be freed
+ */
+static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+       struct ice_q_vector *q_vector;
+       struct ice_ring *ring;
+
+       if (!vsi->q_vectors[v_idx]) {
+               dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
+                       v_idx);
+               return;
+       }
+       q_vector = vsi->q_vectors[v_idx];
+
+       ice_for_each_ring(ring, q_vector->tx)
+               ring->q_vector = NULL;
+       ice_for_each_ring(ring, q_vector->rx)
+               ring->q_vector = NULL;
+
+       /* only VSI with an associated netdev is set up with NAPI */
+       if (vsi->netdev)
+               netif_napi_del(&q_vector->napi);
+
+       devm_kfree(&vsi->back->pdev->dev, q_vector);
+       vsi->q_vectors[v_idx] = NULL;
+}
+
+/**
+ * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI having memory freed
+ */
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
+{
+       int v_idx;
+
+       for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+               ice_free_q_vector(vsi, v_idx);
+}
+
+/**
+ * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the VSI struct
+ *
+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ */
+static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+       struct ice_pf *pf = vsi->back;
+       struct ice_q_vector *q_vector;
+
+       /* allocate q_vector */
+       q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
+       if (!q_vector)
+               return -ENOMEM;
+
+       q_vector->vsi = vsi;
+       q_vector->v_idx = v_idx;
+       if (vsi->type == ICE_VSI_VF)
+               goto out;
+       /* only set affinity_mask if the CPU is online */
+       if (cpu_online(v_idx))
+               cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+
+       /* This will not be called in the driver load path because the netdev
+        * will not be created yet. All other cases with register the NAPI
+        * handler here (i.e. resume, reset/rebuild, etc.)
+        */
+       if (vsi->netdev)
+               netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
+                              NAPI_POLL_WEIGHT);
+
+out:
+       /* tie q_vector and VSI together */
+       vsi->q_vectors[v_idx] = q_vector;
+
+       return 0;
+}
+
+/**
+ * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ */
+static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = vsi->back;
+       int v_idx = 0, num_q_vectors;
+       int err;
+
+       if (vsi->q_vectors[0]) {
+               dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
+                       vsi->vsi_num);
+               return -EEXIST;
+       }
+
+       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+               num_q_vectors = vsi->num_q_vectors;
+       } else {
+               err = -EINVAL;
+               goto err_out;
+       }
+
+       for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+               err = ice_vsi_alloc_q_vector(vsi, v_idx);
+               if (err)
+                       goto err_out;
+       }
+
+       return 0;
+
+err_out:
+       while (v_idx--)
+               ice_free_q_vector(vsi, v_idx);
+
+       dev_err(&pf->pdev->dev,
+               "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
+               vsi->num_q_vectors, vsi->vsi_num, err);
+       vsi->num_q_vectors = 0;
+       return err;
+}
+
+/**
+ * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
+ * @vsi: ptr to the VSI
+ *
+ * This should only be called after ice_vsi_alloc() which allocates the
+ * corresponding SW VSI structure and initializes num_queue_pairs for the
+ * newly allocated VSI.
+ *
+ * Returns 0 on success or negative on failure
+ */
+static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = vsi->back;
+       int num_q_vectors = 0;
+
+       if (vsi->sw_base_vector || vsi->hw_base_vector) {
+               dev_dbg(&pf->pdev->dev, "VSI %d has non-zero HW base vector %d or SW base vector %d\n",
+                       vsi->vsi_num, vsi->hw_base_vector, vsi->sw_base_vector);
+               return -EEXIST;
+       }
+
+       if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+               return -ENOENT;
+
+       switch (vsi->type) {
+       case ICE_VSI_PF:
+               num_q_vectors = vsi->num_q_vectors;
+               /* reserve slots from OS requested IRQs */
+               vsi->sw_base_vector = ice_get_res(pf, pf->sw_irq_tracker,
+                                                 num_q_vectors, vsi->idx);
+               if (vsi->sw_base_vector < 0) {
+                       dev_err(&pf->pdev->dev,
+                               "Failed to get tracking for %d SW vectors for VSI %d, err=%d\n",
+                               num_q_vectors, vsi->vsi_num,
+                               vsi->sw_base_vector);
+                       return -ENOENT;
+               }
+               pf->num_avail_sw_msix -= num_q_vectors;
+
+               /* reserve slots from HW interrupts */
+               vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
+                                                 num_q_vectors, vsi->idx);
+               break;
+       case ICE_VSI_VF:
+               /* take VF misc vector and data vectors into account */
+               num_q_vectors = pf->num_vf_msix;
+               /* For VF VSI, reserve slots only from HW interrupts */
+               vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
+                                                 num_q_vectors, vsi->idx);
+               break;
+       default:
+               dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
+                        vsi->type);
+               break;
+       }
+
+       if (vsi->hw_base_vector < 0) {
+               dev_err(&pf->pdev->dev,
+                       "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n",
+                       num_q_vectors, vsi->vsi_num, vsi->hw_base_vector);
+               if (vsi->type != ICE_VSI_VF) {
+                       ice_free_res(vsi->back->sw_irq_tracker,
+                                    vsi->sw_base_vector, vsi->idx);
+                       pf->num_avail_sw_msix += num_q_vectors;
+               }
+               return -ENOENT;
+       }
+
+       pf->num_avail_hw_msix -= num_q_vectors;
+
+       return 0;
+}
+
+/**
+ * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
+ * @vsi: the VSI having rings deallocated
+ */
+static void ice_vsi_clear_rings(struct ice_vsi *vsi)
+{
+       int i;
+
+       if (vsi->tx_rings) {
+               for (i = 0; i < vsi->alloc_txq; i++) {
+                       if (vsi->tx_rings[i]) {
+                               kfree_rcu(vsi->tx_rings[i], rcu);
+                               vsi->tx_rings[i] = NULL;
+                       }
+               }
+       }
+       if (vsi->rx_rings) {
+               for (i = 0; i < vsi->alloc_rxq; i++) {
+                       if (vsi->rx_rings[i]) {
+                               kfree_rcu(vsi->rx_rings[i], rcu);
+                               vsi->rx_rings[i] = NULL;
+                       }
+               }
+       }
+}
+
+/**
+ * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
+ * @vsi: VSI which is having rings allocated
+ */
+static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = vsi->back;
+       int i;
+
+       /* Allocate tx_rings */
+       for (i = 0; i < vsi->alloc_txq; i++) {
+               struct ice_ring *ring;
+
+               /* allocate with kzalloc(), free with kfree_rcu() */
+               ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+
+               if (!ring)
+                       goto err_out;
+
+               ring->q_index = i;
+               ring->reg_idx = vsi->txq_map[i];
+               ring->ring_active = false;
+               ring->vsi = vsi;
+               ring->dev = &pf->pdev->dev;
+               ring->count = vsi->num_desc;
+               vsi->tx_rings[i] = ring;
+       }
+
+       /* Allocate rx_rings */
+       for (i = 0; i < vsi->alloc_rxq; i++) {
+               struct ice_ring *ring;
+
+               /* allocate with kzalloc(), free with kfree_rcu() */
+               ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+               if (!ring)
+                       goto err_out;
+
+               ring->q_index = i;
+               ring->reg_idx = vsi->rxq_map[i];
+               ring->ring_active = false;
+               ring->vsi = vsi;
+               ring->netdev = vsi->netdev;
+               ring->dev = &pf->pdev->dev;
+               ring->count = vsi->num_desc;
+               vsi->rx_rings[i] = ring;
+       }
+
+       return 0;
+
+err_out:
+       ice_vsi_clear_rings(vsi);
+       return -ENOMEM;
+}
+
+/**
+ * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors allotted
+ * through the MSI-X enabling code. On a constrained vector budget, we map Tx
+ * and Rx rings to the vector as "efficiently" as possible.
+ */
+static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+{
+       int q_vectors = vsi->num_q_vectors;
+       int tx_rings_rem, rx_rings_rem;
+       int v_id;
+
+       /* initially assigning remaining rings count to VSIs num queue value */
+       tx_rings_rem = vsi->num_txq;
+       rx_rings_rem = vsi->num_rxq;
+
+       for (v_id = 0; v_id < q_vectors; v_id++) {
+               struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
+               int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
+
+               /* Tx rings mapping to vector */
+               tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
+               q_vector->num_ring_tx = tx_rings_per_v;
+               q_vector->tx.ring = NULL;
+               q_vector->tx.itr_idx = ICE_TX_ITR;
+               q_base = vsi->num_txq - tx_rings_rem;
+
+               for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
+                       struct ice_ring *tx_ring = vsi->tx_rings[q_id];
+
+                       tx_ring->q_vector = q_vector;
+                       tx_ring->next = q_vector->tx.ring;
+                       q_vector->tx.ring = tx_ring;
+               }
+               tx_rings_rem -= tx_rings_per_v;
+
+               /* Rx rings mapping to vector */
+               rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
+               q_vector->num_ring_rx = rx_rings_per_v;
+               q_vector->rx.ring = NULL;
+               q_vector->rx.itr_idx = ICE_RX_ITR;
+               q_base = vsi->num_rxq - rx_rings_rem;
+
+               for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
+                       struct ice_ring *rx_ring = vsi->rx_rings[q_id];
+
+                       rx_ring->q_vector = q_vector;
+                       rx_ring->next = q_vector->rx.ring;
+                       q_vector->rx.ring = rx_ring;
+               }
+               rx_rings_rem -= rx_rings_per_v;
+       }
+}
+
+/**
+ * ice_vsi_manage_rss_lut - disable/enable RSS
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is an enable or disable request
+ *
+ * In the event of disable request for RSS, this function will zero out RSS
+ * LUT, while in the event of enable request for RSS, it will reconfigure RSS
+ * LUT.
+ */
+int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
+{
+       int err = 0;
+       u8 *lut;
+
+       lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size,
+                          GFP_KERNEL);
+       if (!lut)
+               return -ENOMEM;
+
+       if (ena) {
+               if (vsi->rss_lut_user)
+                       memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+               else
+                       ice_fill_rss_lut(lut, vsi->rss_table_size,
+                                        vsi->rss_size);
+       }
+
+       err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
+       devm_kfree(&vsi->back->pdev->dev, lut);
+       return err;
+}
+
+/**
+ * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
+ * @vsi: VSI to be configured
+ */
+static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
+{
+       u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
+       struct ice_aqc_get_set_rss_keys *key;
+       struct ice_pf *pf = vsi->back;
+       enum ice_status status;
+       int err = 0;
+       u8 *lut;
+
+       vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
+
+       lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
+       if (!lut)
+               return -ENOMEM;
+
+       if (vsi->rss_lut_user)
+               memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+       else
+               ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
+
+       status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
+                                   vsi->rss_table_size);
+
+       if (status) {
+               dev_err(&vsi->back->pdev->dev,
+                       "set_rss_lut failed, error %d\n", status);
+               err = -EIO;
+               goto ice_vsi_cfg_rss_exit;
+       }
+
+       key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
+       if (!key) {
+               err = -ENOMEM;
+               goto ice_vsi_cfg_rss_exit;
+       }
+
+       if (vsi->rss_hkey_user)
+               memcpy(seed, vsi->rss_hkey_user,
+                      ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+       else
+               netdev_rss_key_fill((void *)seed,
+                                   ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+       memcpy(&key->standard_rss_key, seed,
+              ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+
+       status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
+
+       if (status) {
+               dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
+                       status);
+               err = -EIO;
+       }
+
+       devm_kfree(&pf->pdev->dev, key);
+ice_vsi_cfg_rss_exit:
+       devm_kfree(&pf->pdev->dev, lut);
+       return err;
+}
+
+/**
+ * ice_add_mac_to_list - Add a mac address filter entry to the list
+ * @vsi: the VSI to be forwarded to
+ * @add_list: pointer to the list which contains MAC filter entries
+ * @macaddr: the MAC address to be added.
+ *
+ * Adds mac address filter entry to the temp list
+ *
+ * Returns 0 on success or ENOMEM on failure.
+ */
+int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
+                       const u8 *macaddr)
+{
+       struct ice_fltr_list_entry *tmp;
+       struct ice_pf *pf = vsi->back;
+
+       tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
+       if (!tmp)
+               return -ENOMEM;
+
+       tmp->fltr_info.flag = ICE_FLTR_TX;
+       tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
+       tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
+       tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+       tmp->fltr_info.vsi_handle = vsi->idx;
+       ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
+
+       INIT_LIST_HEAD(&tmp->list_entry);
+       list_add(&tmp->list_entry, add_list);
+
+       return 0;
+}
+
+/**
+ * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
+ * @vsi: the VSI to be updated
+ */
+void ice_update_eth_stats(struct ice_vsi *vsi)
+{
+       struct ice_eth_stats *prev_es, *cur_es;
+       struct ice_hw *hw = &vsi->back->hw;
+       u16 vsi_num = vsi->vsi_num;    /* HW absolute index of a VSI */
+
+       prev_es = &vsi->eth_stats_prev;
+       cur_es = &vsi->eth_stats;
+
+       ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
+                         vsi->stat_offsets_loaded, &prev_es->rx_bytes,
+                         &cur_es->rx_bytes);
+
+       ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
+                         vsi->stat_offsets_loaded, &prev_es->rx_unicast,
+                         &cur_es->rx_unicast);
+
+       ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
+                         vsi->stat_offsets_loaded, &prev_es->rx_multicast,
+                         &cur_es->rx_multicast);
+
+       ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
+                         vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
+                         &cur_es->rx_broadcast);
+
+       ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
+                         &prev_es->rx_discards, &cur_es->rx_discards);
+
+       ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
+                         vsi->stat_offsets_loaded, &prev_es->tx_bytes,
+                         &cur_es->tx_bytes);
+
+       ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
+                         vsi->stat_offsets_loaded, &prev_es->tx_unicast,
+                         &cur_es->tx_unicast);
+
+       ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
+                         vsi->stat_offsets_loaded, &prev_es->tx_multicast,
+                         &cur_es->tx_multicast);
+
+       ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
+                         vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
+                         &cur_es->tx_broadcast);
+
+       ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
+                         &prev_es->tx_errors, &cur_es->tx_errors);
+
+       vsi->stat_offsets_loaded = true;
+}
+
+/**
+ * ice_free_fltr_list - free filter lists helper
+ * @dev: pointer to the device struct
+ * @h: pointer to the list head to be freed
+ *
+ * Helper function to free filter lists previously created using
+ * ice_add_mac_to_list
+ */
+void ice_free_fltr_list(struct device *dev, struct list_head *h)
+{
+       struct ice_fltr_list_entry *e, *tmp;
+
+       list_for_each_entry_safe(e, tmp, h, list_entry) {
+               list_del(&e->list_entry);
+               devm_kfree(dev, e);
+       }
+}
+
+/**
+ * ice_vsi_add_vlan - Add VSI membership for given VLAN
+ * @vsi: the VSI being configured
+ * @vid: VLAN id to be added
+ */
+int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
+{
+       struct ice_fltr_list_entry *tmp;
+       struct ice_pf *pf = vsi->back;
+       LIST_HEAD(tmp_add_list);
+       enum ice_status status;
+       int err = 0;
+
+       tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
+       tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
+       tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+       tmp->fltr_info.flag = ICE_FLTR_TX;
+       tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
+       tmp->fltr_info.vsi_handle = vsi->idx;
+       tmp->fltr_info.l_data.vlan.vlan_id = vid;
+
+       INIT_LIST_HEAD(&tmp->list_entry);
+       list_add(&tmp->list_entry, &tmp_add_list);
+
+       status = ice_add_vlan(&pf->hw, &tmp_add_list);
+       if (status) {
+               err = -ENODEV;
+               dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
+                       vid, vsi->vsi_num);
+       }
+
+       ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+       return err;
+}
+
+/**
+ * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
+ * @vsi: the VSI being configured
+ * @vid: VLAN id to be removed
+ *
+ * Returns 0 on success and negative on failure
+ */
+int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
+{
+       struct ice_fltr_list_entry *list;
+       struct ice_pf *pf = vsi->back;
+       LIST_HEAD(tmp_add_list);
+       int status = 0;
+
+       list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+       if (!list)
+               return -ENOMEM;
+
+       list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
+       list->fltr_info.vsi_handle = vsi->idx;
+       list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+       list->fltr_info.l_data.vlan.vlan_id = vid;
+       list->fltr_info.flag = ICE_FLTR_TX;
+       list->fltr_info.src_id = ICE_SRC_ID_VSI;
+
+       INIT_LIST_HEAD(&list->list_entry);
+       list_add(&list->list_entry, &tmp_add_list);
+
+       if (ice_remove_vlan(&pf->hw, &tmp_add_list)) {
+               dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
+                       vid, vsi->vsi_num);
+               status = -EIO;
+       }
+
+       ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+       return status;
+}
+
+/**
+ * ice_vsi_cfg_rxqs - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Rx VSI for operation.
+ */
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+{
+       int err = 0;
+       u16 i;
+
+       if (vsi->type == ICE_VSI_VF)
+               goto setup_rings;
+
+       if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
+               vsi->max_frame = vsi->netdev->mtu +
+                       ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+       else
+               vsi->max_frame = ICE_RXBUF_2048;
+
+       vsi->rx_buf_len = ICE_RXBUF_2048;
+setup_rings:
+       /* set up individual rings */
+       for (i = 0; i < vsi->num_rxq && !err; i++)
+               err = ice_setup_rx_ctx(vsi->rx_rings[i]);
+
+       if (err) {
+               dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
+               return -EIO;
+       }
+       return err;
+}
+
+/**
+ * ice_vsi_cfg_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
+{
+       struct ice_aqc_add_tx_qgrp *qg_buf;
+       struct ice_aqc_add_txqs_perq *txq;
+       struct ice_pf *pf = vsi->back;
+       enum ice_status status;
+       u16 buf_len, i, pf_q;
+       int err = 0, tc = 0;
+       u8 num_q_grps;
+
+       buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
+       qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
+       if (!qg_buf)
+               return -ENOMEM;
+
+       if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
+               err = -EINVAL;
+               goto err_cfg_txqs;
+       }
+       qg_buf->num_txqs = 1;
+       num_q_grps = 1;
+
+       /* set up and configure the Tx queues */
+       ice_for_each_txq(vsi, i) {
+               struct ice_tlan_ctx tlan_ctx = { 0 };
+
+               pf_q = vsi->txq_map[i];
+               ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
+               /* copy context contents into the qg_buf */
+               qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+               ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+                           ice_tlan_ctx_info);
+
+               /* init queue specific tail reg. It is referred as transmit
+                * comm scheduler queue doorbell.
+                */
+               vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+               status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
+                                        num_q_grps, qg_buf, buf_len, NULL);
+               if (status) {
+                       dev_err(&vsi->back->pdev->dev,
+                               "Failed to set LAN Tx queue context, error: %d\n",
+                               status);
+                       err = -ENODEV;
+                       goto err_cfg_txqs;
+               }
+
+               /* Add Tx Queue TEID into the VSI Tx ring from the response
+                * This will complete configuring and enabling the queue.
+                */
+               txq = &qg_buf->txqs[0];
+               if (pf_q == le16_to_cpu(txq->txq_id))
+                       vsi->tx_rings[i]->txq_teid =
+                               le32_to_cpu(txq->q_teid);
+       }
+err_cfg_txqs:
+       devm_kfree(&pf->pdev->dev, qg_buf);
+       return err;
+}
+
+/**
+ * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
+ * @intrl: interrupt rate limit in usecs
+ * @gran: interrupt rate limit granularity in usecs
+ *
+ * This function converts a decimal interrupt rate limit in usecs to the format
+ * expected by firmware.
+ */
+static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
+{
+       u32 val = intrl / gran;
+
+       if (val)
+               return val | GLINT_RATE_INTRL_ENA_M;
+       return 0;
+}
+
+/**
+ * ice_cfg_itr - configure the initial interrupt throttle values
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector that's being configured
+ * @vector: HW vector index to apply the interrupt throttling to
+ *
+ * Configure interrupt throttling values for the ring containers that are
+ * associated with the interrupt vector passed in.
+ */
+static void
+ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
+{
+       u8 itr_gran = hw->itr_gran;
+
+       if (q_vector->num_ring_rx) {
+               struct ice_ring_container *rc = &q_vector->rx;
+
+               rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran);
+               rc->latency_range = ICE_LOW_LATENCY;
+               wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+       }
+
+       if (q_vector->num_ring_tx) {
+               struct ice_ring_container *rc = &q_vector->tx;
+
+               rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran);
+               rc->latency_range = ICE_LOW_LATENCY;
+               wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+       }
+}
+
+/**
+ * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
+ * @vsi: the VSI being configured
+ */
+void ice_vsi_cfg_msix(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = vsi->back;
+       u16 vector = vsi->hw_base_vector;
+       struct ice_hw *hw = &pf->hw;
+       u32 txq = 0, rxq = 0;
+       int i, q;
+
+       for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+               struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+               ice_cfg_itr(hw, q_vector, vector);
+
+               wr32(hw, GLINT_RATE(vector),
+                    ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
+
+               /* Both Transmit Queue Interrupt Cause Control register
+                * and Receive Queue Interrupt Cause control register
+                * expects MSIX_INDX field to be the vector index
+                * within the function space and not the absolute
+                * vector index across PF or across device.
+                * For SR-IOV VF VSIs queue vector index always starts
+                * with 1 since first vector index(0) is used for OICR
+                * in VF space. Since VMDq and other PF VSIs are within
+                * the PF function space, use the vector index that is
+                * tracked for this PF.
+                */
+               for (q = 0; q < q_vector->num_ring_tx; q++) {
+                       int itr_idx = q_vector->tx.itr_idx;
+                       u32 val;
+
+                       if (vsi->type == ICE_VSI_VF)
+                               val = QINT_TQCTL_CAUSE_ENA_M |
+                                     (itr_idx << QINT_TQCTL_ITR_INDX_S)  |
+                                     ((i + 1) << QINT_TQCTL_MSIX_INDX_S);
+                       else
+                               val = QINT_TQCTL_CAUSE_ENA_M |
+                                     (itr_idx << QINT_TQCTL_ITR_INDX_S)  |
+                                     (vector << QINT_TQCTL_MSIX_INDX_S);
+                       wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+                       txq++;
+               }
+
+               for (q = 0; q < q_vector->num_ring_rx; q++) {
+                       int itr_idx = q_vector->rx.itr_idx;
+                       u32 val;
+
+                       if (vsi->type == ICE_VSI_VF)
+                               val = QINT_RQCTL_CAUSE_ENA_M |
+                                     (itr_idx << QINT_RQCTL_ITR_INDX_S)  |
+                                     ((i + 1) << QINT_RQCTL_MSIX_INDX_S);
+                       else
+                               val = QINT_RQCTL_CAUSE_ENA_M |
+                                     (itr_idx << QINT_RQCTL_ITR_INDX_S)  |
+                                     (vector << QINT_RQCTL_MSIX_INDX_S);
+                       wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+                       rxq++;
+               }
+       }
+
+       ice_flush(hw);
+}
+
+/**
+ * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
+ * @vsi: the VSI being changed
+ */
+int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
+{
+       struct device *dev = &vsi->back->pdev->dev;
+       struct ice_hw *hw = &vsi->back->hw;
+       struct ice_vsi_ctx ctxt = { 0 };
+       enum ice_status status;
+
+       /* Here we are configuring the VSI to let the driver add VLAN tags by
+        * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
+        * insertion happens in the Tx hot path, in ice_tx_map.
+        */
+       ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
+
+       ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+       status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+       if (status) {
+               dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+                       status, hw->adminq.sq_last_status);
+               return -EIO;
+       }
+
+       vsi->info.vlan_flags = ctxt.info.vlan_flags;
+       return 0;
+}
+
+/**
+ * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is a enable or disable request
+ */
+int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
+{
+       struct device *dev = &vsi->back->pdev->dev;
+       struct ice_hw *hw = &vsi->back->hw;
+       struct ice_vsi_ctx ctxt = { 0 };
+       enum ice_status status;
+
+       /* Here we are configuring what the VSI should do with the VLAN tag in
+        * the Rx packet. We can either leave the tag in the packet or put it in
+        * the Rx descriptor.
+        */
+       if (ena) {
+               /* Strip VLAN tag from Rx packet and put it in the desc */
+               ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
+       } else {
+               /* Disable stripping. Leave tag in packet */
+               ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+       }
+
+       /* Allow all packets untagged/tagged */
+       ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
+
+       ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+       status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+       if (status) {
+               dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
+                       ena, status, hw->adminq.sq_last_status);
+               return -EIO;
+       }
+
+       vsi->info.vlan_flags = ctxt.info.vlan_flags;
+       return 0;
+}
+
+/**
+ * ice_vsi_start_rx_rings - start VSI's Rx rings
+ * @vsi: the VSI whose rings are to be started
+ *
+ * Returns 0 on success and a negative value on error
+ */
+int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
+{
+       return ice_vsi_ctrl_rx_rings(vsi, true);
+}
+
+/**
+ * ice_vsi_stop_rx_rings - stop VSI's Rx rings
+ * @vsi: the VSI
+ *
+ * Returns 0 on success and a negative value on error
+ */
+int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
+{
+       return ice_vsi_ctrl_rx_rings(vsi, false);
+}
+
+/**
+ * ice_vsi_stop_tx_rings - Disable Tx rings
+ * @vsi: the VSI being configured
+ * @rst_src: reset source
+ * @rel_vmvf_num: Relative id of VF/VM
+ */
+int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+                         u16 rel_vmvf_num)
+{
+       struct ice_pf *pf = vsi->back;
+       struct ice_hw *hw = &pf->hw;
+       enum ice_status status;
+       u32 *q_teids, val;
+       u16 *q_ids, i;
+       int err = 0;
+
+       if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
+               return -EINVAL;
+
+       q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
+                              GFP_KERNEL);
+       if (!q_teids)
+               return -ENOMEM;
+
+       q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
+                            GFP_KERNEL);
+       if (!q_ids) {
+               err = -ENOMEM;
+               goto err_alloc_q_ids;
+       }
+
+       /* set up the Tx queue list to be disabled */
+       ice_for_each_txq(vsi, i) {
+               u16 v_idx;
+
+               if (!vsi->tx_rings || !vsi->tx_rings[i]) {
+                       err = -EINVAL;
+                       goto err_out;
+               }
+
+               q_ids[i] = vsi->txq_map[i];
+               q_teids[i] = vsi->tx_rings[i]->txq_teid;
+
+               /* clear cause_ena bit for disabled queues */
+               val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
+               val &= ~QINT_TQCTL_CAUSE_ENA_M;
+               wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
+
+               /* software is expected to wait for 100 ns */
+               ndelay(100);
+
+               /* trigger a software interrupt for the vector associated to
+                * the queue to schedule NAPI handler
+                */
+               v_idx = vsi->tx_rings[i]->q_vector->v_idx;
+               wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
+                    GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
+       }
+       status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
+                                rst_src, rel_vmvf_num, NULL);
+       /* if the disable queue command was exercised during an active reset
+        * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
+        * the reset operation disables queues at the hardware level anyway.
+        */
+       if (status == ICE_ERR_RESET_ONGOING) {
+               dev_info(&pf->pdev->dev,
+                        "Reset in progress. LAN Tx queues already disabled\n");
+       } else if (status) {
+               dev_err(&pf->pdev->dev,
+                       "Failed to disable LAN Tx queues, error: %d\n",
+                       status);
+               err = -ENODEV;
+       }
+
+err_out:
+       devm_kfree(&pf->pdev->dev, q_ids);
+
+err_alloc_q_ids:
+       devm_kfree(&pf->pdev->dev, q_teids);
+
+       return err;
+}
+
+/**
+ * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
+ * @vsi: VSI to enable or disable VLAN pruning on
+ * @ena: set to true to enable VLAN pruning and false to disable it
+ *
+ * returns 0 if VSI is updated, negative otherwise
+ */
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+{
+       struct ice_vsi_ctx *ctxt;
+       struct device *dev;
+       int status;
+
+       if (!vsi)
+               return -EINVAL;
+
+       dev = &vsi->back->pdev->dev;
+       ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+       if (!ctxt)
+               return -ENOMEM;
+
+       ctxt->info = vsi->info;
+
+       if (ena) {
+               ctxt->info.sec_flags |=
+                       ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+                       ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+               ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+       } else {
+               ctxt->info.sec_flags &=
+                       ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+                         ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+               ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+       }
+
+       ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
+                                               ICE_AQ_VSI_PROP_SW_VALID);
+
+       status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
+       if (status) {
+               netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
+                          ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status,
+                          vsi->back->hw.adminq.sq_last_status);
+               goto err_out;
+       }
+
+       vsi->info.sec_flags = ctxt->info.sec_flags;
+       vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+
+       devm_kfree(dev, ctxt);
+       return 0;
+
+err_out:
+       devm_kfree(dev, ctxt);
+       return -EIO;
+}
+
+/**
+ * ice_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ * @type: VSI type
+ * @vf_id: defines VF id to which this VSI connects. This field is meant to be
+ *         used only for ICE_VSI_VF VSI type. For other VSI types, should
+ *         fill-in ICE_INVAL_VFID as input.
+ *
+ * This allocates the sw VSI structure and its queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct on
+ * success, NULL on failure.
+ */
+struct ice_vsi *
+ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
+             enum ice_vsi_type type, u16 vf_id)
+{
+       u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+       struct device *dev = &pf->pdev->dev;
+       struct ice_vsi *vsi;
+       int ret, i;
+
+       vsi = ice_vsi_alloc(pf, type);
+       if (!vsi) {
+               dev_err(dev, "could not allocate VSI\n");
+               return NULL;
+       }
+
+       vsi->port_info = pi;
+       vsi->vsw = pf->first_sw;
+       if (vsi->type == ICE_VSI_VF)
+               vsi->vf_id = vf_id;
+
+       if (ice_vsi_get_qs(vsi)) {
+               dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
+                       vsi->idx);
+               goto unroll_get_qs;
+       }
+
+       /* set RSS capabilities */
+       ice_vsi_set_rss_params(vsi);
+
+       /* create the VSI */
+       ret = ice_vsi_init(vsi);
+       if (ret)
+               goto unroll_get_qs;
+
+       switch (vsi->type) {
+       case ICE_VSI_PF:
+               ret = ice_vsi_alloc_q_vectors(vsi);
+               if (ret)
+                       goto unroll_vsi_init;
+
+               ret = ice_vsi_setup_vector_base(vsi);
+               if (ret)
+                       goto unroll_alloc_q_vector;
+
+               ret = ice_vsi_alloc_rings(vsi);
+               if (ret)
+                       goto unroll_vector_base;
+
+               ice_vsi_map_rings_to_vectors(vsi);
+
+               /* Do not exit if configuring RSS had an issue, at least
+                * receive traffic on first queue. Hence no need to capture
+                * return value
+                */
+               if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+                       ice_vsi_cfg_rss_lut_key(vsi);
+               break;
+       case ICE_VSI_VF:
+               /* VF driver will take care of creating netdev for this type and
+                * map queues to vectors through Virtchnl, PF driver only
+                * creates a VSI and corresponding structures for bookkeeping
+                * purpose
+                */
+               ret = ice_vsi_alloc_q_vectors(vsi);
+               if (ret)
+                       goto unroll_vsi_init;
+
+               ret = ice_vsi_alloc_rings(vsi);
+               if (ret)
+                       goto unroll_alloc_q_vector;
+
+               /* Setup Vector base only during VF init phase or when VF asks
+                * for more vectors than assigned number. In all other cases,
+                * assign hw_base_vector to the value given earlier.
+                */
+               if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) {
+                       ret = ice_vsi_setup_vector_base(vsi);
+                       if (ret)
+                               goto unroll_vector_base;
+               } else {
+                       vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx;
+               }
+               pf->q_left_tx -= vsi->alloc_txq;
+               pf->q_left_rx -= vsi->alloc_rxq;
+               break;
+       default:
+               /* if VSI type is not recognized, clean up the resources and
+                * exit
+                */
+               goto unroll_vsi_init;
+       }
+
+       ice_vsi_set_tc_cfg(vsi);
+
+       /* configure VSI nodes based on number of queues and TC's */
+       for (i = 0; i < vsi->tc_cfg.numtc; i++)
+               max_txqs[i] = vsi->num_txq;
+
+       ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+                             max_txqs);
+       if (ret) {
+               dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
+               goto unroll_vector_base;
+       }
+
+       return vsi;
+
+unroll_vector_base:
+       /* reclaim SW interrupts back to the common pool */
+       ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+       pf->num_avail_sw_msix += vsi->num_q_vectors;
+       /* reclaim HW interrupt back to the common pool */
+       ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
+       pf->num_avail_hw_msix += vsi->num_q_vectors;
+unroll_alloc_q_vector:
+       ice_vsi_free_q_vectors(vsi);
+unroll_vsi_init:
+       ice_vsi_delete(vsi);
+unroll_get_qs:
+       ice_vsi_put_qs(vsi);
+       pf->q_left_tx += vsi->alloc_txq;
+       pf->q_left_rx += vsi->alloc_rxq;
+       ice_vsi_clear(vsi);
+
+       return NULL;
+}
+
+/**
+ * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
+ * @vsi: the VSI being cleaned up
+ */
+static void ice_vsi_release_msix(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = vsi->back;
+       u16 vector = vsi->hw_base_vector;
+       struct ice_hw *hw = &pf->hw;
+       u32 txq = 0;
+       u32 rxq = 0;
+       int i, q;
+
+       for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+               struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+               wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0);
+               wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0);
+               for (q = 0; q < q_vector->num_ring_tx; q++) {
+                       wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
+                       txq++;
+               }
+
+               for (q = 0; q < q_vector->num_ring_rx; q++) {
+                       wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
+                       rxq++;
+               }
+       }
+
+       ice_flush(hw);
+}
+
+/**
+ * ice_vsi_free_irq - Free the IRQ association with the OS
+ * @vsi: the VSI being configured
+ */
+void ice_vsi_free_irq(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = vsi->back;
+       int base = vsi->sw_base_vector;
+
+       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+               int i;
+
+               if (!vsi->q_vectors || !vsi->irqs_ready)
+                       return;
+
+               ice_vsi_release_msix(vsi);
+               if (vsi->type == ICE_VSI_VF)
+                       return;
+
+               vsi->irqs_ready = false;
+               for (i = 0; i < vsi->num_q_vectors; i++) {
+                       u16 vector = i + base;
+                       int irq_num;
+
+                       irq_num = pf->msix_entries[vector].vector;
+
+                       /* free only the irqs that were actually requested */
+                       if (!vsi->q_vectors[i] ||
+                           !(vsi->q_vectors[i]->num_ring_tx ||
+                             vsi->q_vectors[i]->num_ring_rx))
+                               continue;
+
+                       /* clear the affinity notifier in the IRQ descriptor */
+                       irq_set_affinity_notifier(irq_num, NULL);
+
+                       /* clear the affinity_mask in the IRQ descriptor */
+                       irq_set_affinity_hint(irq_num, NULL);
+                       synchronize_irq(irq_num);
+                       devm_free_irq(&pf->pdev->dev, irq_num,
+                                     vsi->q_vectors[i]);
+               }
+       }
+}
+
+/**
+ * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
+ * @vsi: the VSI having resources freed
+ */
+void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
+{
+       int i;
+
+       if (!vsi->tx_rings)
+               return;
+
+       ice_for_each_txq(vsi, i)
+               if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+                       ice_free_tx_ring(vsi->tx_rings[i]);
+}
+
+/**
+ * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
+ * @vsi: the VSI having resources freed
+ */
+void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
+{
+       int i;
+
+       if (!vsi->rx_rings)
+               return;
+
+       ice_for_each_rxq(vsi, i)
+               if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
+                       ice_free_rx_ring(vsi->rx_rings[i]);
+}
+
+/**
+ * ice_vsi_close - Shut down a VSI
+ * @vsi: the VSI being shut down
+ */
+void ice_vsi_close(struct ice_vsi *vsi)
+{
+       if (!test_and_set_bit(__ICE_DOWN, vsi->state))
+               ice_down(vsi);
+
+       ice_vsi_free_irq(vsi);
+       ice_vsi_free_tx_rings(vsi);
+       ice_vsi_free_rx_rings(vsi);
+}
+
+/**
+ * ice_free_res - free a block of resources
+ * @res: pointer to the resource
+ * @index: starting index previously returned by ice_get_res
+ * @id: identifier to track owner
+ *
+ * Returns number of resources freed
+ */
+int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
+{
+       int count = 0;
+       int i;
+
+       if (!res || index >= res->num_entries)
+               return -EINVAL;
+
+       id |= ICE_RES_VALID_BIT;
+       for (i = index; i < res->num_entries && res->list[i] == id; i++) {
+               res->list[i] = 0;
+               count++;
+       }
+
+       return count;
+}
+
+/**
+ * ice_search_res - Search the tracker for a block of resources
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or -ENOMEM for error
+ */
+static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
+{
+       int start = res->search_hint;
+       int end = start;
+
+       if ((start + needed) >  res->num_entries)
+               return -ENOMEM;
+
+       id |= ICE_RES_VALID_BIT;
+
+       do {
+               /* skip already allocated entries */
+               if (res->list[end++] & ICE_RES_VALID_BIT) {
+                       start = end;
+                       if ((start + needed) > res->num_entries)
+                               break;
+               }
+
+               if (end == (start + needed)) {
+                       int i = start;
+
+                       /* there was enough, so assign it to the requestor */
+                       while (i != end)
+                               res->list[i++] = id;
+
+                       if (end == res->num_entries)
+                               end = 0;
+
+                       res->search_hint = end;
+                       return start;
+               }
+       } while (1);
+
+       return -ENOMEM;
+}
+
+/**
+ * ice_get_res - get a block of resources
+ * @pf: board private structure
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or -ENOMEM for error
+ * The search_hint trick and lack of advanced fit-finding only works
+ * because we're highly likely to have all the same sized requests.
+ * Linear search time and any fragmentation should be minimal.
+ */
+int
+ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
+{
+       int ret;
+
+       if (!res || !pf)
+               return -EINVAL;
+
+       if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
+               dev_err(&pf->pdev->dev,
+                       "param err: needed=%d, num_entries = %d id=0x%04x\n",
+                       needed, res->num_entries, id);
+               return -EINVAL;
+       }
+
+       /* search based on search_hint */
+       ret = ice_search_res(res, needed, id);
+
+       if (ret < 0) {
+               /* previous search failed. Reset search hint and try again */
+               res->search_hint = 0;
+               ret = ice_search_res(res, needed, id);
+       }
+
+       return ret;
+}
+
+/**
+ * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ */
+void ice_vsi_dis_irq(struct ice_vsi *vsi)
+{
+       int base = vsi->sw_base_vector;
+       struct ice_pf *pf = vsi->back;
+       struct ice_hw *hw = &pf->hw;
+       u32 val;
+       int i;
+
+       /* disable interrupt causation from each queue */
+       if (vsi->tx_rings) {
+               ice_for_each_txq(vsi, i) {
+                       if (vsi->tx_rings[i]) {
+                               u16 reg;
+
+                               reg = vsi->tx_rings[i]->reg_idx;
+                               val = rd32(hw, QINT_TQCTL(reg));
+                               val &= ~QINT_TQCTL_CAUSE_ENA_M;
+                               wr32(hw, QINT_TQCTL(reg), val);
+                       }
+               }
+       }
+
+       if (vsi->rx_rings) {
+               ice_for_each_rxq(vsi, i) {
+                       if (vsi->rx_rings[i]) {
+                               u16 reg;
+
+                               reg = vsi->rx_rings[i]->reg_idx;
+                               val = rd32(hw, QINT_RQCTL(reg));
+                               val &= ~QINT_RQCTL_CAUSE_ENA_M;
+                               wr32(hw, QINT_RQCTL(reg), val);
+                       }
+               }
+       }
+
+       /* disable each interrupt */
+       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+               for (i = vsi->hw_base_vector;
+                    i < (vsi->num_q_vectors + vsi->hw_base_vector); i++)
+                       wr32(hw, GLINT_DYN_CTL(i), 0);
+
+               ice_flush(hw);
+               for (i = 0; i < vsi->num_q_vectors; i++)
+                       synchronize_irq(pf->msix_entries[i + base].vector);
+       }
+}
+
+/**
+ * ice_vsi_release - Delete a VSI and free its resources
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success or < 0 on error
+ */
+int ice_vsi_release(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf;
+       struct ice_vf *vf;
+
+       if (!vsi->back)
+               return -ENODEV;
+       pf = vsi->back;
+       vf = &pf->vf[vsi->vf_id];
+       /* do not unregister and free netdevs while driver is in the reset
+        * recovery pending state. Since reset/rebuild happens through PF
+        * service task workqueue, its not a good idea to unregister netdev
+        * that is associated to the PF that is running the work queue items
+        * currently. This is done to avoid check_flush_dependency() warning
+        * on this wq
+        */
+       if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
+               unregister_netdev(vsi->netdev);
+               free_netdev(vsi->netdev);
+               vsi->netdev = NULL;
+       }
+
+       if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+               ice_rss_clean(vsi);
+
+       /* Disable VSI and free resources */
+       ice_vsi_dis_irq(vsi);
+       ice_vsi_close(vsi);
+
+       /* reclaim interrupt vectors back to PF */
+       if (vsi->type != ICE_VSI_VF) {
+               /* reclaim SW interrupts back to the common pool */
+               ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector,
+                            vsi->idx);
+               pf->num_avail_sw_msix += vsi->num_q_vectors;
+               /* reclaim HW interrupts back to the common pool */
+               ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector,
+                            vsi->idx);
+               pf->num_avail_hw_msix += vsi->num_q_vectors;
+       } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) {
+               /* Reclaim VF resources back only while freeing all VFs or
+                * vector reassignment is requested
+                */
+               ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx,
+                            vsi->idx);
+               pf->num_avail_hw_msix += pf->num_vf_msix;
+       }
+
+       ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+       ice_vsi_delete(vsi);
+       ice_vsi_free_q_vectors(vsi);
+       ice_vsi_clear_rings(vsi);
+
+       ice_vsi_put_qs(vsi);
+       pf->q_left_tx += vsi->alloc_txq;
+       pf->q_left_rx += vsi->alloc_rxq;
+
+       /* retain SW VSI data structure since it is needed to unregister and
+        * free VSI netdev when PF is not in reset recovery pending state,\
+        * for ex: during rmmod.
+        */
+       if (!ice_is_reset_in_progress(pf->state))
+               ice_vsi_clear(vsi);
+
+       return 0;
+}
+
+/**
+ * ice_vsi_rebuild - Rebuild VSI after reset
+ * @vsi: VSI to be rebuild
+ *
+ * Returns 0 on success and negative value on failure
+ */
+int ice_vsi_rebuild(struct ice_vsi *vsi)
+{
+       u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+       int ret, i;
+
+       if (!vsi)
+               return -EINVAL;
+
+       ice_vsi_free_q_vectors(vsi);
+       ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+       ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
+       vsi->sw_base_vector = 0;
+       vsi->hw_base_vector = 0;
+       ice_vsi_clear_rings(vsi);
+       ice_vsi_free_arrays(vsi, false);
+       ice_vsi_set_num_qs(vsi);
+
+       /* Initialize VSI struct elements and create VSI in FW */
+       ret = ice_vsi_init(vsi);
+       if (ret < 0)
+               goto err_vsi;
+
+       ret = ice_vsi_alloc_arrays(vsi, false);
+       if (ret < 0)
+               goto err_vsi;
+
+       switch (vsi->type) {
+       case ICE_VSI_PF:
+               ret = ice_vsi_alloc_q_vectors(vsi);
+               if (ret)
+                       goto err_rings;
+
+               ret = ice_vsi_setup_vector_base(vsi);
+               if (ret)
+                       goto err_vectors;
+
+               ret = ice_vsi_alloc_rings(vsi);
+               if (ret)
+                       goto err_vectors;
+
+               ice_vsi_map_rings_to_vectors(vsi);
+               break;
+       case ICE_VSI_VF:
+               ret = ice_vsi_alloc_q_vectors(vsi);
+               if (ret)
+                       goto err_rings;
+
+               ret = ice_vsi_setup_vector_base(vsi);
+               if (ret)
+                       goto err_vectors;
+
+               ret = ice_vsi_alloc_rings(vsi);
+               if (ret)
+                       goto err_vectors;
+
+               vsi->back->q_left_tx -= vsi->alloc_txq;
+               vsi->back->q_left_rx -= vsi->alloc_rxq;
+               break;
+       default:
+               break;
+       }
+
+       ice_vsi_set_tc_cfg(vsi);
+
+       /* configure VSI nodes based on number of queues and TC's */
+       for (i = 0; i < vsi->tc_cfg.numtc; i++)
+               max_txqs[i] = vsi->num_txq;
+
+       ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+                             max_txqs);
+       if (ret) {
+               dev_info(&vsi->back->pdev->dev,
+                        "Failed VSI lan queue config\n");
+               goto err_vectors;
+       }
+       return 0;
+
+err_vectors:
+       ice_vsi_free_q_vectors(vsi);
+err_rings:
+       if (vsi->netdev) {
+               vsi->current_netdev_flags = 0;
+               unregister_netdev(vsi->netdev);
+               free_netdev(vsi->netdev);
+               vsi->netdev = NULL;
+       }
+err_vsi:
+       ice_vsi_clear(vsi);
+       set_bit(__ICE_RESET_FAILED, vsi->back->state);
+       return ret;
+}
+
+/**
+ * ice_is_reset_in_progress - check for a reset in progress
+ * @state: pf state field
+ */
+bool ice_is_reset_in_progress(unsigned long *state)
+{
+       return test_bit(__ICE_RESET_OICR_RECV, state) ||
+              test_bit(__ICE_PFR_REQ, state) ||
+              test_bit(__ICE_CORER_REQ, state) ||
+              test_bit(__ICE_GLOBR_REQ, state);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
new file mode 100644 (file)
index 0000000..677db40
--- /dev/null
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_LIB_H_
+#define _ICE_LIB_H_
+
+#include "ice.h"
+
+int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
+                       const u8 *macaddr);
+
+void ice_free_fltr_list(struct device *dev, struct list_head *h);
+
+void ice_update_eth_stats(struct ice_vsi *vsi);
+
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
+
+int ice_vsi_cfg_txqs(struct ice_vsi *vsi);
+
+void ice_vsi_cfg_msix(struct ice_vsi *vsi);
+
+int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
+
+int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
+
+int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi);
+
+int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena);
+
+int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
+
+int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
+
+int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+                         u16 rel_vmvf_num);
+
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
+
+void ice_vsi_delete(struct ice_vsi *vsi);
+
+int ice_vsi_clear(struct ice_vsi *vsi);
+
+struct ice_vsi *
+ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
+             enum ice_vsi_type type, u16 vf_id);
+
+int ice_vsi_release(struct ice_vsi *vsi);
+
+void ice_vsi_close(struct ice_vsi *vsi);
+
+int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
+
+int
+ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
+
+int ice_vsi_rebuild(struct ice_vsi *vsi);
+
+bool ice_is_reset_in_progress(unsigned long *state);
+
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
+
+void ice_vsi_put_qs(struct ice_vsi *vsi);
+
+void ice_vsi_dis_irq(struct ice_vsi *vsi);
+
+void ice_vsi_free_irq(struct ice_vsi *vsi);
+
+void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
+
+void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
+
+int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
+
+int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+
+irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data);
+#endif /* !_ICE_LIB_H_ */
index 4f5fe6af6dac33f7117c30d073665992affff0b9..8f61b375e7687d10be07ff84b7b3f5cea25d51c2 100644 (file)
@@ -6,8 +6,9 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include "ice.h"
+#include "ice_lib.h"
 
-#define DRV_VERSION    "0.7.1-k"
+#define DRV_VERSION    "0.7.2-k"
 #define DRV_SUMMARY    "Intel(R) Ethernet Connection E800 Series Linux Driver"
 const char ice_drv_ver[] = DRV_VERSION;
 static const char ice_driver_string[] = DRV_SUMMARY;
@@ -31,7 +32,7 @@ static const struct net_device_ops ice_netdev_ops;
 
 static void ice_pf_dis_all_vsi(struct ice_pf *pf);
 static void ice_rebuild(struct ice_pf *pf);
-static int ice_vsi_release(struct ice_vsi *vsi);
+
 static void ice_vsi_release_all(struct ice_pf *pf);
 static void ice_update_vsi_stats(struct ice_vsi *vsi);
 static void ice_update_pf_stats(struct ice_pf *pf);
@@ -94,7 +95,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
                                /* Trigger sw interrupt to revive the queue */
                                v_idx = tx_ring->q_vector->v_idx;
                                wr32(&vsi->back->hw,
-                                    GLINT_DYN_CTL(vsi->base_vector + v_idx),
+                                    GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
                                     (itr << GLINT_DYN_CTL_ITR_INDX_S) |
                                     GLINT_DYN_CTL_SWINT_TRIG_M |
                                     GLINT_DYN_CTL_INTENA_MSK_M);
@@ -111,171 +112,6 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
        }
 }
 
-/**
- * ice_get_free_slot - get the next non-NULL location index in array
- * @array: array to search
- * @size: size of the array
- * @curr: last known occupied index to be used as a search hint
- *
- * void * is being used to keep the functionality generic. This lets us use this
- * function on any array of pointers.
- */
-static int ice_get_free_slot(void *array, int size, int curr)
-{
-       int **tmp_array = (int **)array;
-       int next;
-
-       if (curr < (size - 1) && !tmp_array[curr + 1]) {
-               next = curr + 1;
-       } else {
-               int i = 0;
-
-               while ((i < size) && (tmp_array[i]))
-                       i++;
-               if (i == size)
-                       next = ICE_NO_VSI;
-               else
-                       next = i;
-       }
-       return next;
-}
-
-/**
- * ice_search_res - Search the tracker for a block of resources
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- * Returns the base item index of the block, or -ENOMEM for error
- */
-static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
-{
-       int start = res->search_hint;
-       int end = start;
-
-       id |= ICE_RES_VALID_BIT;
-
-       do {
-               /* skip already allocated entries */
-               if (res->list[end++] & ICE_RES_VALID_BIT) {
-                       start = end;
-                       if ((start + needed) > res->num_entries)
-                               break;
-               }
-
-               if (end == (start + needed)) {
-                       int i = start;
-
-                       /* there was enough, so assign it to the requestor */
-                       while (i != end)
-                               res->list[i++] = id;
-
-                       if (end == res->num_entries)
-                               end = 0;
-
-                       res->search_hint = end;
-                       return start;
-               }
-       } while (1);
-
-       return -ENOMEM;
-}
-
-/**
- * ice_get_res - get a block of resources
- * @pf: board private structure
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- *
- * Returns the base item index of the block, or -ENOMEM for error
- * The search_hint trick and lack of advanced fit-finding only works
- * because we're highly likely to have all the same sized requests.
- * Linear search time and any fragmentation should be minimal.
- */
-static int
-ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
-{
-       int ret;
-
-       if (!res || !pf)
-               return -EINVAL;
-
-       if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
-               dev_err(&pf->pdev->dev,
-                       "param err: needed=%d, num_entries = %d id=0x%04x\n",
-                       needed, res->num_entries, id);
-               return -EINVAL;
-       }
-
-       /* search based on search_hint */
-       ret = ice_search_res(res, needed, id);
-
-       if (ret < 0) {
-               /* previous search failed. Reset search hint and try again */
-               res->search_hint = 0;
-               ret = ice_search_res(res, needed, id);
-       }
-
-       return ret;
-}
-
-/**
- * ice_free_res - free a block of resources
- * @res: pointer to the resource
- * @index: starting index previously returned by ice_get_res
- * @id: identifier to track owner
- * Returns number of resources freed
- */
-static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
-{
-       int count = 0;
-       int i;
-
-       if (!res || index >= res->num_entries)
-               return -EINVAL;
-
-       id |= ICE_RES_VALID_BIT;
-       for (i = index; i < res->num_entries && res->list[i] == id; i++) {
-               res->list[i] = 0;
-               count++;
-       }
-
-       return count;
-}
-
-/**
- * ice_add_mac_to_list - Add a mac address filter entry to the list
- * @vsi: the VSI to be forwarded to
- * @add_list: pointer to the list which contains MAC filter entries
- * @macaddr: the MAC address to be added.
- *
- * Adds mac address filter entry to the temp list
- *
- * Returns 0 on success or ENOMEM on failure.
- */
-static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
-                              const u8 *macaddr)
-{
-       struct ice_fltr_list_entry *tmp;
-       struct ice_pf *pf = vsi->back;
-
-       tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
-       if (!tmp)
-               return -ENOMEM;
-
-       tmp->fltr_info.flag = ICE_FLTR_TX;
-       tmp->fltr_info.src = vsi->vsi_num;
-       tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
-       tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
-       tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
-       ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
-
-       INIT_LIST_HEAD(&tmp->list_entry);
-       list_add(&tmp->list_entry, add_list);
-
-       return 0;
-}
-
 /**
  * ice_add_mac_to_sync_list - creates list of mac addresses to be synced
  * @netdev: the net device on which the sync is happening
@@ -318,24 +154,6 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
        return 0;
 }
 
-/**
- * ice_free_fltr_list - free filter lists helper
- * @dev: pointer to the device struct
- * @h: pointer to the list head to be freed
- *
- * Helper function to free filter lists previously created using
- * ice_add_mac_to_list
- */
-static void ice_free_fltr_list(struct device *dev, struct list_head *h)
-{
-       struct ice_fltr_list_entry *e, *tmp;
-
-       list_for_each_entry_safe(e, tmp, h, list_entry) {
-               list_del(&e->list_entry);
-               devm_kfree(dev, e);
-       }
-}
-
 /**
  * ice_vsi_fltr_changed - check if filter state changed
  * @vsi: VSI to be checked
@@ -349,63 +167,6 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
               test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
 }
 
-/**
- * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
- * @vsi: VSI to enable or disable VLAN pruning on
- * @ena: set to true to enable VLAN pruning and false to disable it
- *
- * returns 0 if VSI is updated, negative otherwise
- */
-static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
-{
-       struct ice_vsi_ctx *ctxt;
-       struct device *dev;
-       int status;
-
-       if (!vsi)
-               return -EINVAL;
-
-       dev = &vsi->back->pdev->dev;
-       ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
-       if (!ctxt)
-               return -ENOMEM;
-
-       ctxt->info = vsi->info;
-
-       if (ena) {
-               ctxt->info.sec_flags |=
-                       ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
-                       ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
-               ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
-       } else {
-               ctxt->info.sec_flags &=
-                       ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
-                         ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
-               ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
-       }
-
-       ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
-                                               ICE_AQ_VSI_PROP_SW_VALID);
-       ctxt->vsi_num = vsi->vsi_num;
-       status = ice_aq_update_vsi(&vsi->back->hw, ctxt, NULL);
-       if (status) {
-               netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI %d failed, err = %d, aq_err = %d\n",
-                          ena ? "Ena" : "Dis", vsi->vsi_num, status,
-                          vsi->back->hw.adminq.sq_last_status);
-               goto err_out;
-       }
-
-       vsi->info.sec_flags = ctxt->info.sec_flags;
-       vsi->info.sw_flags2 = ctxt->info.sw_flags2;
-
-       devm_kfree(dev, ctxt);
-       return 0;
-
-err_out:
-       devm_kfree(dev, ctxt);
-       return -EIO;
-}
-
 /**
  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
  * @vsi: ptr to the VSI
@@ -492,7 +253,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
                clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
                if (vsi->current_netdev_flags & IFF_PROMISC) {
                        /* Apply TX filter rule to get traffic from VMs */
-                       status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
+                       status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
                                                  ICE_FLTR_TX);
                        if (status) {
                                netdev_err(netdev, "Error setting default VSI %i tx rule\n",
@@ -502,7 +263,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
                                goto out_promisc;
                        }
                        /* Apply RX filter rule to get traffic from wire */
-                       status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
+                       status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
                                                  ICE_FLTR_RX);
                        if (status) {
                                netdev_err(netdev, "Error setting default VSI %i rx rule\n",
@@ -513,7 +274,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
                        }
                } else {
                        /* Clear TX filter rule to stop traffic from VMs */
-                       status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
+                       status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
                                                  ICE_FLTR_TX);
                        if (status) {
                                netdev_err(netdev, "Error clearing default VSI %i tx rule\n",
@@ -522,8 +283,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
                                err = -EIO;
                                goto out_promisc;
                        }
-                       /* Clear filter RX to remove traffic from wire */
-                       status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
+                       /* Clear RX filter to remove traffic from wire */
+                       status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
                                                  ICE_FLTR_RX);
                        if (status) {
                                netdev_err(netdev, "Error clearing default VSI %i rx rule\n",
@@ -570,15 +331,6 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
                }
 }
 
-/**
- * ice_is_reset_recovery_pending - schedule a reset
- * @state: pf state field
- */
-static bool ice_is_reset_recovery_pending(unsigned long int *state)
-{
-       return test_bit(__ICE_RESET_RECOVERY_PENDING, state);
-}
-
 /**
  * ice_prepare_for_reset - prep for the core to reset
  * @pf: board private structure
@@ -590,6 +342,10 @@ ice_prepare_for_reset(struct ice_pf *pf)
 {
        struct ice_hw *hw = &pf->hw;
 
+       /* Notify VFs of impending reset */
+       if (ice_check_sq_alive(hw, &hw->mailboxq))
+               ice_vc_notify_reset(pf);
+
        /* disable the VSIs and their queues that are not already DOWN */
        ice_pf_dis_all_vsi(pf);
 
@@ -612,21 +368,17 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
        dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
        WARN_ON(in_interrupt());
 
-       /* PFR is a bit of a special case because it doesn't result in an OICR
-        * interrupt. Set pending bit here which otherwise gets set in the
-        * OICR handler.
-        */
-       if (reset_type == ICE_RESET_PFR)
-               set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
-
        ice_prepare_for_reset(pf);
 
        /* trigger the reset */
        if (ice_reset(hw, reset_type)) {
                dev_err(dev, "reset %d failed\n", reset_type);
                set_bit(__ICE_RESET_FAILED, pf->state);
-               clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+               clear_bit(__ICE_RESET_OICR_RECV, pf->state);
                clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
+               clear_bit(__ICE_PFR_REQ, pf->state);
+               clear_bit(__ICE_CORER_REQ, pf->state);
+               clear_bit(__ICE_GLOBR_REQ, pf->state);
                return;
        }
 
@@ -637,8 +389,8 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
        if (reset_type == ICE_RESET_PFR) {
                pf->pfr_count++;
                ice_rebuild(pf);
-               clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
                clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
+               clear_bit(__ICE_PFR_REQ, pf->state);
        }
 }
 
@@ -653,14 +405,14 @@ static void ice_reset_subtask(struct ice_pf *pf)
        /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
         * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
         * of reset is pending and sets bits in pf->state indicating the reset
-        * type and __ICE_RESET_RECOVERY_PENDING.  So, if the latter bit is set
+        * type and __ICE_RESET_OICR_RECV.  So, if the latter bit is set
         * prepare for pending reset if not already (for PF software-initiated
         * global resets the software should already be prepared for it as
         * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
         * by firmware or software on other PFs, that bit is not set so prepare
         * for the reset now), poll for reset done, rebuild and return.
         */
-       if (ice_is_reset_recovery_pending(pf->state)) {
+       if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
                clear_bit(__ICE_GLOBR_RECV, pf->state);
                clear_bit(__ICE_CORER_RECV, pf->state);
                if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
@@ -676,19 +428,22 @@ static void ice_reset_subtask(struct ice_pf *pf)
                        /* clear bit to resume normal operations, but
                         * ICE_NEEDS_RESTART bit is set incase rebuild failed
                         */
-                       clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+                       clear_bit(__ICE_RESET_OICR_RECV, pf->state);
                        clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
+                       clear_bit(__ICE_PFR_REQ, pf->state);
+                       clear_bit(__ICE_CORER_REQ, pf->state);
+                       clear_bit(__ICE_GLOBR_REQ, pf->state);
                }
 
                return;
        }
 
        /* No pending resets to finish processing. Check for new resets */
-       if (test_and_clear_bit(__ICE_PFR_REQ, pf->state))
+       if (test_bit(__ICE_PFR_REQ, pf->state))
                reset_type = ICE_RESET_PFR;
-       if (test_and_clear_bit(__ICE_CORER_REQ, pf->state))
+       if (test_bit(__ICE_CORER_REQ, pf->state))
                reset_type = ICE_RESET_CORER;
-       if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state))
+       if (test_bit(__ICE_GLOBR_REQ, pf->state))
                reset_type = ICE_RESET_GLOBR;
        /* If no valid reset type requested just return */
        if (reset_type == ICE_RESET_INVAL)
@@ -910,6 +665,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
                }
        }
 
+       ice_vc_notify_link_state(pf);
+
        return 0;
 }
 
@@ -960,6 +717,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
                cq = &hw->adminq;
                qtype = "Admin";
                break;
+       case ICE_CTL_Q_MAILBOX:
+               cq = &hw->mailboxq;
+               qtype = "Mailbox";
+               break;
        default:
                dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
                         q_type);
@@ -1041,6 +802,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
                                dev_err(&pf->pdev->dev,
                                        "Could not handle link event\n");
                        break;
+               case ice_mbx_opc_send_msg_to_pf:
+                       ice_vc_process_vf_msg(pf, &event);
+                       break;
                case ice_aqc_opc_fw_logging:
                        ice_output_fw_log(hw, &event.desc, event.msg_buf);
                        break;
@@ -1099,6 +863,28 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
        ice_flush(hw);
 }
 
+/**
+ * ice_clean_mailboxq_subtask - clean the MailboxQ rings
+ * @pf: board private structure
+ */
+static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
+{
+       struct ice_hw *hw = &pf->hw;
+
+       if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
+               return;
+
+       if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
+               return;
+
+       clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
+
+       if (ice_ctrlq_pending(hw, &hw->mailboxq))
+               __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
+
+       ice_flush(hw);
+}
+
 /**
  * ice_service_task_schedule - schedule the service task to wake up
  * @pf: board private structure
@@ -1165,6 +951,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
        struct ice_hw *hw = &pf->hw;
        bool mdd_detected = false;
        u32 reg;
+       int i;
 
        if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
                return;
@@ -1254,6 +1041,51 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
                }
        }
 
+       /* see if one of the VFs needs to be reset */
+       for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+               struct ice_vf *vf = &pf->vf[i];
+
+               reg = rd32(hw, VP_MDET_TX_PQM(i));
+               if (reg & VP_MDET_TX_PQM_VALID_M) {
+                       wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
+                       vf->num_mdd_events++;
+                       dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+                                i);
+               }
+
+               reg = rd32(hw, VP_MDET_TX_TCLAN(i));
+               if (reg & VP_MDET_TX_TCLAN_VALID_M) {
+                       wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
+                       vf->num_mdd_events++;
+                       dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+                                i);
+               }
+
+               reg = rd32(hw, VP_MDET_TX_TDPU(i));
+               if (reg & VP_MDET_TX_TDPU_VALID_M) {
+                       wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
+                       vf->num_mdd_events++;
+                       dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+                                i);
+               }
+
+               reg = rd32(hw, VP_MDET_RX(i));
+               if (reg & VP_MDET_RX_VALID_M) {
+                       wr32(hw, VP_MDET_RX(i), 0xFFFF);
+                       vf->num_mdd_events++;
+                       dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
+                                i);
+               }
+
+               if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) {
+                       dev_info(&pf->pdev->dev,
+                                "Too many MDD events on VF %d, disabled\n", i);
+                       dev_info(&pf->pdev->dev,
+                                "Use PF Control I/F to re-enable the VF\n");
+                       set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+               }
+       }
+
        /* re-enable MDD interrupt cause */
        clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
        reg = rd32(hw, PFINT_OICR_ENA);
@@ -1277,7 +1109,7 @@ static void ice_service_task(struct work_struct *work)
        ice_reset_subtask(pf);
 
        /* bail if a reset/recovery cycle is pending or rebuild failed */
-       if (ice_is_reset_recovery_pending(pf->state) ||
+       if (ice_is_reset_in_progress(pf->state) ||
            test_bit(__ICE_SUSPENDED, pf->state) ||
            test_bit(__ICE_NEEDS_RESTART, pf->state)) {
                ice_service_task_complete(pf);
@@ -1287,8 +1119,10 @@ static void ice_service_task(struct work_struct *work)
        ice_check_for_hang_subtask(pf);
        ice_sync_fltr_subtask(pf);
        ice_handle_mdd_event(pf);
+       ice_process_vflr_event(pf);
        ice_watchdog_subtask(pf);
        ice_clean_adminq_subtask(pf);
+       ice_clean_mailboxq_subtask(pf);
 
        /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
        ice_service_task_complete(pf);
@@ -1299,6 +1133,8 @@ static void ice_service_task(struct work_struct *work)
         */
        if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
            test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
+           test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+           test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
            test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
                mod_timer(&pf->serv_tmr, jiffies);
 }
@@ -1313,6 +1149,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
        hw->adminq.num_sq_entries = ICE_AQ_LEN;
        hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
        hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
+       hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
+       hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
+       hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
+       hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
 }
 
 /**
@@ -1342,57 +1182,6 @@ static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
  */
 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
 
-/**
- * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
- * @vsi: the VSI being un-configured
- */
-static void ice_vsi_dis_irq(struct ice_vsi *vsi)
-{
-       struct ice_pf *pf = vsi->back;
-       struct ice_hw *hw = &pf->hw;
-       int base = vsi->base_vector;
-       u32 val;
-       int i;
-
-       /* disable interrupt causation from each queue */
-       if (vsi->tx_rings) {
-               ice_for_each_txq(vsi, i) {
-                       if (vsi->tx_rings[i]) {
-                               u16 reg;
-
-                               reg = vsi->tx_rings[i]->reg_idx;
-                               val = rd32(hw, QINT_TQCTL(reg));
-                               val &= ~QINT_TQCTL_CAUSE_ENA_M;
-                               wr32(hw, QINT_TQCTL(reg), val);
-                       }
-               }
-       }
-
-       if (vsi->rx_rings) {
-               ice_for_each_rxq(vsi, i) {
-                       if (vsi->rx_rings[i]) {
-                               u16 reg;
-
-                               reg = vsi->rx_rings[i]->reg_idx;
-                               val = rd32(hw, QINT_RQCTL(reg));
-                               val &= ~QINT_RQCTL_CAUSE_ENA_M;
-                               wr32(hw, QINT_RQCTL(reg), val);
-                       }
-               }
-       }
-
-       /* disable each interrupt */
-       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
-               for (i = vsi->base_vector;
-                    i < (vsi->num_q_vectors + vsi->base_vector); i++)
-                       wr32(hw, GLINT_DYN_CTL(i), 0);
-
-               ice_flush(hw);
-               for (i = 0; i < vsi->num_q_vectors; i++)
-                       synchronize_irq(pf->msix_entries[i + base].vector);
-       }
-}
-
 /**
  * ice_vsi_ena_irq - Enable IRQ for the given VSI
  * @vsi: the VSI being configured
@@ -1413,26 +1202,6 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi)
        return 0;
 }
 
-/**
- * ice_vsi_delete - delete a VSI from the switch
- * @vsi: pointer to VSI being removed
- */
-static void ice_vsi_delete(struct ice_vsi *vsi)
-{
-       struct ice_pf *pf = vsi->back;
-       struct ice_vsi_ctx ctxt;
-       enum ice_status status;
-
-       ctxt.vsi_num = vsi->vsi_num;
-
-       memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
-
-       status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL);
-       if (status)
-               dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
-                       vsi->vsi_num);
-}
-
 /**
  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
  * @vsi: the VSI being configured
@@ -1442,7 +1211,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
 {
        int q_vectors = vsi->num_q_vectors;
        struct ice_pf *pf = vsi->back;
-       int base = vsi->base_vector;
+       int base = vsi->sw_base_vector;
        int rx_int_idx = 0;
        int tx_int_idx = 0;
        int vector, err;
@@ -1501,541 +1270,84 @@ free_q_irqs:
 }
 
 /**
- * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
- * @vsi: the VSI being configured
+ * ice_ena_misc_vector - enable the non-queue interrupts
+ * @pf: board private structure
  */
-static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
+static void ice_ena_misc_vector(struct ice_pf *pf)
 {
-       struct ice_hw_common_caps *cap;
-       struct ice_pf *pf = vsi->back;
+       struct ice_hw *hw = &pf->hw;
+       u32 val;
 
-       if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
-               vsi->rss_size = 1;
-               return;
-       }
+       /* clear things first */
+       wr32(hw, PFINT_OICR_ENA, 0);    /* disable all */
+       rd32(hw, PFINT_OICR);           /* read to clear */
 
-       cap = &pf->hw.func_caps.common_cap;
-       switch (vsi->type) {
-       case ICE_VSI_PF:
-               /* PF VSI will inherit RSS instance of PF */
-               vsi->rss_table_size = cap->rss_table_size;
-               vsi->rss_size = min_t(int, num_online_cpus(),
-                                     BIT(cap->rss_table_entry_width));
-               vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
-               break;
-       default:
-               dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
-               break;
-       }
+       val = (PFINT_OICR_ECC_ERR_M |
+              PFINT_OICR_MAL_DETECT_M |
+              PFINT_OICR_GRST_M |
+              PFINT_OICR_PCI_EXCEPTION_M |
+              PFINT_OICR_VFLR_M |
+              PFINT_OICR_HMC_ERR_M |
+              PFINT_OICR_PE_CRITERR_M);
+
+       wr32(hw, PFINT_OICR_ENA, val);
+
+       /* SW_ITR_IDX = 0, but don't change INTENA */
+       wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
+            GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
 }
 
 /**
- * ice_vsi_setup_q_map - Setup a VSI queue map
- * @vsi: the VSI being configured
- * @ctxt: VSI context structure
+ * ice_misc_intr - misc interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
  */
-static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
 {
-       u16 offset = 0, qmap = 0, numq_tc;
-       u16 pow = 0, max_rss = 0, qcount;
-       u16 qcount_tx = vsi->alloc_txq;
-       u16 qcount_rx = vsi->alloc_rxq;
-       bool ena_tc0 = false;
-       int i;
-
-       /* at least TC0 should be enabled by default */
-       if (vsi->tc_cfg.numtc) {
-               if (!(vsi->tc_cfg.ena_tc & BIT(0)))
-                       ena_tc0 =  true;
-       } else {
-               ena_tc0 =  true;
-       }
-
-       if (ena_tc0) {
-               vsi->tc_cfg.numtc++;
-               vsi->tc_cfg.ena_tc |= 1;
-       }
+       struct ice_pf *pf = (struct ice_pf *)data;
+       struct ice_hw *hw = &pf->hw;
+       irqreturn_t ret = IRQ_NONE;
+       u32 oicr, ena_mask;
 
-       numq_tc = qcount_rx / vsi->tc_cfg.numtc;
-
-       /* TC mapping is a function of the number of Rx queues assigned to the
-        * VSI for each traffic class and the offset of these queues.
-        * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
-        * queues allocated to TC0. No:of queues is a power-of-2.
-        *
-        * If TC is not enabled, the queue offset is set to 0, and allocate one
-        * queue, this way, traffic for the given TC will be sent to the default
-        * queue.
-        *
-        * Setup number and offset of Rx queues for all TCs for the VSI
-        */
+       set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
+       set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
 
-       /* qcount will change if RSS is enabled */
-       if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
-               if (vsi->type == ICE_VSI_PF)
-                       max_rss = ICE_MAX_LG_RSS_QS;
-               else
-                       max_rss = ICE_MAX_SMALL_RSS_QS;
+       oicr = rd32(hw, PFINT_OICR);
+       ena_mask = rd32(hw, PFINT_OICR_ENA);
 
-               qcount = min_t(int, numq_tc, max_rss);
-               qcount = min_t(int, qcount, vsi->rss_size);
-       } else {
-               qcount = numq_tc;
+       if (oicr & PFINT_OICR_MAL_DETECT_M) {
+               ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
+               set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
        }
-
-       /* find the (rounded up) power-of-2 of qcount */
-       pow = order_base_2(qcount);
-
-       for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
-               if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
-                       /* TC is not enabled */
-                       vsi->tc_cfg.tc_info[i].qoffset = 0;
-                       vsi->tc_cfg.tc_info[i].qcount = 1;
-                       ctxt->info.tc_mapping[i] = 0;
-                       continue;
-               }
-
-               /* TC is enabled */
-               vsi->tc_cfg.tc_info[i].qoffset = offset;
-               vsi->tc_cfg.tc_info[i].qcount = qcount;
-
-               qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
-                       ICE_AQ_VSI_TC_Q_OFFSET_M) |
-                       ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
-                        ICE_AQ_VSI_TC_Q_NUM_M);
-               offset += qcount;
-               ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
+       if (oicr & PFINT_OICR_VFLR_M) {
+               ena_mask &= ~PFINT_OICR_VFLR_M;
+               set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
        }
 
-       vsi->num_txq = qcount_tx;
-       vsi->num_rxq = offset;
+       if (oicr & PFINT_OICR_GRST_M) {
+               u32 reset;
 
-       /* Rx queue mapping */
-       ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
-       /* q_mapping buffer holds the info for the first queue allocated for
-        * this VSI in the PF space and also the number of queues associated
-        * with this VSI.
-        */
-       ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
-       ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
-}
+               /* we have a reset warning */
+               ena_mask &= ~PFINT_OICR_GRST_M;
+               reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
+                       GLGEN_RSTAT_RESET_TYPE_S;
 
-/**
- * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
- * @ctxt: the VSI context being set
- *
- * This initializes a default VSI context for all sections except the Queues.
- */
-static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
-{
-       u32 table = 0;
-
-       memset(&ctxt->info, 0, sizeof(ctxt->info));
-       /* VSI's should be allocated from shared pool */
-       ctxt->alloc_from_pool = true;
-       /* Src pruning enabled by default */
-       ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
-       /* Traffic from VSI can be sent to LAN */
-       ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
-
-       /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
-        * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
-        * packets untagged/tagged.
-        */
-       ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
-                                 ICE_AQ_VSI_VLAN_MODE_M) >>
-                                ICE_AQ_VSI_VLAN_MODE_S);
-
-       /* Have 1:1 UP mapping for both ingress/egress tables */
-       table |= ICE_UP_TABLE_TRANSLATE(0, 0);
-       table |= ICE_UP_TABLE_TRANSLATE(1, 1);
-       table |= ICE_UP_TABLE_TRANSLATE(2, 2);
-       table |= ICE_UP_TABLE_TRANSLATE(3, 3);
-       table |= ICE_UP_TABLE_TRANSLATE(4, 4);
-       table |= ICE_UP_TABLE_TRANSLATE(5, 5);
-       table |= ICE_UP_TABLE_TRANSLATE(6, 6);
-       table |= ICE_UP_TABLE_TRANSLATE(7, 7);
-       ctxt->info.ingress_table = cpu_to_le32(table);
-       ctxt->info.egress_table = cpu_to_le32(table);
-       /* Have 1:1 UP mapping for outer to inner UP table */
-       ctxt->info.outer_up_table = cpu_to_le32(table);
-       /* No Outer tag support outer_tag_flags remains to zero */
-}
-
-/**
- * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
- * @ctxt: the VSI context being set
- * @vsi: the VSI being configured
- */
-static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
-{
-       u8 lut_type, hash_type;
-
-       switch (vsi->type) {
-       case ICE_VSI_PF:
-               /* PF VSI will inherit RSS instance of PF */
-               lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
-               hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
-               break;
-       default:
-               dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
-                        vsi->type);
-               return;
-       }
-
-       ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
-                               ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
-                               ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
-                                ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
-}
-
-/**
- * ice_vsi_init - Create and initialize a VSI
- * @vsi: the VSI being configured
- *
- * This initializes a VSI context depending on the VSI type to be added and
- * passes it down to the add_vsi aq command to create a new VSI.
- */
-static int ice_vsi_init(struct ice_vsi *vsi)
-{
-       struct ice_vsi_ctx ctxt = { 0 };
-       struct ice_pf *pf = vsi->back;
-       struct ice_hw *hw = &pf->hw;
-       int ret = 0;
-
-       switch (vsi->type) {
-       case ICE_VSI_PF:
-               ctxt.flags = ICE_AQ_VSI_TYPE_PF;
-               break;
-       default:
-               return -ENODEV;
-       }
-
-       ice_set_dflt_vsi_ctx(&ctxt);
-       /* if the switch is in VEB mode, allow VSI loopback */
-       if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
-               ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
-
-       /* Set LUT type and HASH type if RSS is enabled */
-       if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
-               ice_set_rss_vsi_ctx(&ctxt, vsi);
-
-       ctxt.info.sw_id = vsi->port_info->sw_id;
-       ice_vsi_setup_q_map(vsi, &ctxt);
-
-       ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL);
-       if (ret) {
-               dev_err(&pf->pdev->dev,
-                       "Add VSI failed, err %d\n", ret);
-               return -EIO;
-       }
-
-       /* keep context for update VSI operations */
-       vsi->info = ctxt.info;
-
-       /* record VSI number returned */
-       vsi->vsi_num = ctxt.vsi_num;
-
-       return ret;
-}
-
-/**
- * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
- * @vsi: the VSI being cleaned up
- */
-static void ice_vsi_release_msix(struct ice_vsi *vsi)
-{
-       struct ice_pf *pf = vsi->back;
-       u16 vector = vsi->base_vector;
-       struct ice_hw *hw = &pf->hw;
-       u32 txq = 0;
-       u32 rxq = 0;
-       int i, q;
-
-       for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
-               struct ice_q_vector *q_vector = vsi->q_vectors[i];
-
-               wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);
-               wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);
-               for (q = 0; q < q_vector->num_ring_tx; q++) {
-                       wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
-                       txq++;
-               }
-
-               for (q = 0; q < q_vector->num_ring_rx; q++) {
-                       wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
-                       rxq++;
-               }
-       }
-
-       ice_flush(hw);
-}
-
-/**
- * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
- * @vsi: the VSI having rings deallocated
- */
-static void ice_vsi_clear_rings(struct ice_vsi *vsi)
-{
-       int i;
-
-       if (vsi->tx_rings) {
-               for (i = 0; i < vsi->alloc_txq; i++) {
-                       if (vsi->tx_rings[i]) {
-                               kfree_rcu(vsi->tx_rings[i], rcu);
-                               vsi->tx_rings[i] = NULL;
-                       }
-               }
-       }
-       if (vsi->rx_rings) {
-               for (i = 0; i < vsi->alloc_rxq; i++) {
-                       if (vsi->rx_rings[i]) {
-                               kfree_rcu(vsi->rx_rings[i], rcu);
-                               vsi->rx_rings[i] = NULL;
-                       }
-               }
-       }
-}
-
-/**
- * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
- * @vsi: VSI which is having rings allocated
- */
-static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
-{
-       struct ice_pf *pf = vsi->back;
-       int i;
-
-       /* Allocate tx_rings */
-       for (i = 0; i < vsi->alloc_txq; i++) {
-               struct ice_ring *ring;
-
-               /* allocate with kzalloc(), free with kfree_rcu() */
-               ring = kzalloc(sizeof(*ring), GFP_KERNEL);
-
-               if (!ring)
-                       goto err_out;
-
-               ring->q_index = i;
-               ring->reg_idx = vsi->txq_map[i];
-               ring->ring_active = false;
-               ring->vsi = vsi;
-               ring->netdev = vsi->netdev;
-               ring->dev = &pf->pdev->dev;
-               ring->count = vsi->num_desc;
-
-               vsi->tx_rings[i] = ring;
-       }
-
-       /* Allocate rx_rings */
-       for (i = 0; i < vsi->alloc_rxq; i++) {
-               struct ice_ring *ring;
-
-               /* allocate with kzalloc(), free with kfree_rcu() */
-               ring = kzalloc(sizeof(*ring), GFP_KERNEL);
-               if (!ring)
-                       goto err_out;
-
-               ring->q_index = i;
-               ring->reg_idx = vsi->rxq_map[i];
-               ring->ring_active = false;
-               ring->vsi = vsi;
-               ring->netdev = vsi->netdev;
-               ring->dev = &pf->pdev->dev;
-               ring->count = vsi->num_desc;
-               vsi->rx_rings[i] = ring;
-       }
-
-       return 0;
-
-err_out:
-       ice_vsi_clear_rings(vsi);
-       return -ENOMEM;
-}
-
-/**
- * ice_vsi_free_irq - Free the irq association with the OS
- * @vsi: the VSI being configured
- */
-static void ice_vsi_free_irq(struct ice_vsi *vsi)
-{
-       struct ice_pf *pf = vsi->back;
-       int base = vsi->base_vector;
-
-       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
-               int i;
-
-               if (!vsi->q_vectors || !vsi->irqs_ready)
-                       return;
-
-               vsi->irqs_ready = false;
-               for (i = 0; i < vsi->num_q_vectors; i++) {
-                       u16 vector = i + base;
-                       int irq_num;
-
-                       irq_num = pf->msix_entries[vector].vector;
-
-                       /* free only the irqs that were actually requested */
-                       if (!vsi->q_vectors[i] ||
-                           !(vsi->q_vectors[i]->num_ring_tx ||
-                             vsi->q_vectors[i]->num_ring_rx))
-                               continue;
-
-                       /* clear the affinity notifier in the IRQ descriptor */
-                       irq_set_affinity_notifier(irq_num, NULL);
-
-                       /* clear the affinity_mask in the IRQ descriptor */
-                       irq_set_affinity_hint(irq_num, NULL);
-                       synchronize_irq(irq_num);
-                       devm_free_irq(&pf->pdev->dev, irq_num,
-                                     vsi->q_vectors[i]);
-               }
-               ice_vsi_release_msix(vsi);
-       }
-}
-
-/**
- * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
- * @vsi: the VSI being configured
- */
-static void ice_vsi_cfg_msix(struct ice_vsi *vsi)
-{
-       struct ice_pf *pf = vsi->back;
-       u16 vector = vsi->base_vector;
-       struct ice_hw *hw = &pf->hw;
-       u32 txq = 0, rxq = 0;
-       int i, q, itr;
-       u8 itr_gran;
-
-       for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
-               struct ice_q_vector *q_vector = vsi->q_vectors[i];
-
-               itr_gran = hw->itr_gran_200;
-
-               if (q_vector->num_ring_rx) {
-                       q_vector->rx.itr =
-                               ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting,
-                                          itr_gran);
-                       q_vector->rx.latency_range = ICE_LOW_LATENCY;
-               }
-
-               if (q_vector->num_ring_tx) {
-                       q_vector->tx.itr =
-                               ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting,
-                                          itr_gran);
-                       q_vector->tx.latency_range = ICE_LOW_LATENCY;
-               }
-               wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
-               wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
-
-               /* Both Transmit Queue Interrupt Cause Control register
-                * and Receive Queue Interrupt Cause control register
-                * expects MSIX_INDX field to be the vector index
-                * within the function space and not the absolute
-                * vector index across PF or across device.
-                * For SR-IOV VF VSIs queue vector index always starts
-                * with 1 since first vector index(0) is used for OICR
-                * in VF space. Since VMDq and other PF VSIs are withtin
-                * the PF function space, use the vector index thats
-                * tracked for this PF.
-                */
-               for (q = 0; q < q_vector->num_ring_tx; q++) {
-                       u32 val;
-
-                       itr = ICE_TX_ITR;
-                       val = QINT_TQCTL_CAUSE_ENA_M |
-                             (itr << QINT_TQCTL_ITR_INDX_S)  |
-                             (vector << QINT_TQCTL_MSIX_INDX_S);
-                       wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
-                       txq++;
-               }
-
-               for (q = 0; q < q_vector->num_ring_rx; q++) {
-                       u32 val;
-
-                       itr = ICE_RX_ITR;
-                       val = QINT_RQCTL_CAUSE_ENA_M |
-                             (itr << QINT_RQCTL_ITR_INDX_S)  |
-                             (vector << QINT_RQCTL_MSIX_INDX_S);
-                       wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
-                       rxq++;
-               }
-       }
-
-       ice_flush(hw);
-}
-
-/**
- * ice_ena_misc_vector - enable the non-queue interrupts
- * @pf: board private structure
- */
-static void ice_ena_misc_vector(struct ice_pf *pf)
-{
-       struct ice_hw *hw = &pf->hw;
-       u32 val;
-
-       /* clear things first */
-       wr32(hw, PFINT_OICR_ENA, 0);    /* disable all */
-       rd32(hw, PFINT_OICR);           /* read to clear */
-
-       val = (PFINT_OICR_ECC_ERR_M |
-              PFINT_OICR_MAL_DETECT_M |
-              PFINT_OICR_GRST_M |
-              PFINT_OICR_PCI_EXCEPTION_M |
-              PFINT_OICR_HMC_ERR_M |
-              PFINT_OICR_PE_CRITERR_M);
-
-       wr32(hw, PFINT_OICR_ENA, val);
-
-       /* SW_ITR_IDX = 0, but don't change INTENA */
-       wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
-            GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
-}
-
-/**
- * ice_misc_intr - misc interrupt handler
- * @irq: interrupt number
- * @data: pointer to a q_vector
- */
-static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
-{
-       struct ice_pf *pf = (struct ice_pf *)data;
-       struct ice_hw *hw = &pf->hw;
-       irqreturn_t ret = IRQ_NONE;
-       u32 oicr, ena_mask;
-
-       set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
-
-       oicr = rd32(hw, PFINT_OICR);
-       ena_mask = rd32(hw, PFINT_OICR_ENA);
-
-       if (oicr & PFINT_OICR_MAL_DETECT_M) {
-               ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
-               set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
-       }
-
-       if (oicr & PFINT_OICR_GRST_M) {
-               u32 reset;
-
-               /* we have a reset warning */
-               ena_mask &= ~PFINT_OICR_GRST_M;
-               reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
-                       GLGEN_RSTAT_RESET_TYPE_S;
-
-               if (reset == ICE_RESET_CORER)
-                       pf->corer_count++;
-               else if (reset == ICE_RESET_GLOBR)
-                       pf->globr_count++;
-               else
-                       pf->empr_count++;
+               if (reset == ICE_RESET_CORER)
+                       pf->corer_count++;
+               else if (reset == ICE_RESET_GLOBR)
+                       pf->globr_count++;
+               else if (reset == ICE_RESET_EMPR)
+                       pf->empr_count++;
+               else
+                       dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n",
+                               reset);
 
                /* If a reset cycle isn't already in progress, we set a bit in
                 * pf->state so that the service task can start a reset/rebuild.
                 * We also make note of which reset happened so that peer
                 * devices/drivers can be informed.
                 */
-               if (!test_and_set_bit(__ICE_RESET_RECOVERY_PENDING,
-                                     pf->state)) {
+               if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
                        if (reset == ICE_RESET_CORER)
                                set_bit(__ICE_CORER_RECV, pf->state);
                        else if (reset == ICE_RESET_GLOBR)
@@ -2049,7 +1361,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
                         * is received and set back to false after the driver
                         * has determined that the hardware is out of reset.
                         *
-                        * __ICE_RESET_RECOVERY_PENDING in pf->state indicates
+                        * __ICE_RESET_OICR_RECV in pf->state indicates
                         * that a post reset rebuild is required before the
                         * driver is operational again. This is set above.
                         *
@@ -2097,286 +1409,106 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
 }
 
 /**
- * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
- * @vsi: the VSI being configured
- *
- * This function maps descriptor rings to the queue-specific vectors allotted
- * through the MSI-X enabling code. On a constrained vector budget, we map Tx
- * and Rx rings to the vector as "efficiently" as possible.
+ * ice_free_irq_msix_misc - Unroll misc vector setup
+ * @pf: board private structure
  */
-static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+static void ice_free_irq_msix_misc(struct ice_pf *pf)
 {
-       int q_vectors = vsi->num_q_vectors;
-       int tx_rings_rem, rx_rings_rem;
-       int v_id;
-
-       /* initially assigning remaining rings count to VSIs num queue value */
-       tx_rings_rem = vsi->num_txq;
-       rx_rings_rem = vsi->num_rxq;
-
-       for (v_id = 0; v_id < q_vectors; v_id++) {
-               struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
-               int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
-
-               /* Tx rings mapping to vector */
-               tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
-               q_vector->num_ring_tx = tx_rings_per_v;
-               q_vector->tx.ring = NULL;
-               q_base = vsi->num_txq - tx_rings_rem;
-
-               for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
-                       struct ice_ring *tx_ring = vsi->tx_rings[q_id];
-
-                       tx_ring->q_vector = q_vector;
-                       tx_ring->next = q_vector->tx.ring;
-                       q_vector->tx.ring = tx_ring;
-               }
-               tx_rings_rem -= tx_rings_per_v;
-
-               /* Rx rings mapping to vector */
-               rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
-               q_vector->num_ring_rx = rx_rings_per_v;
-               q_vector->rx.ring = NULL;
-               q_base = vsi->num_rxq - rx_rings_rem;
-
-               for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
-                       struct ice_ring *rx_ring = vsi->rx_rings[q_id];
+       /* disable OICR interrupt */
+       wr32(&pf->hw, PFINT_OICR_ENA, 0);
+       ice_flush(&pf->hw);
 
-                       rx_ring->q_vector = q_vector;
-                       rx_ring->next = q_vector->rx.ring;
-                       q_vector->rx.ring = rx_ring;
-               }
-               rx_rings_rem -= rx_rings_per_v;
+       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
+               synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector);
+               devm_free_irq(&pf->pdev->dev,
+                             pf->msix_entries[pf->sw_oicr_idx].vector, pf);
        }
-}
-
-/**
- * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- */
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
-{
-       struct ice_pf *pf = vsi->back;
 
-       switch (vsi->type) {
-       case ICE_VSI_PF:
-               vsi->alloc_txq = pf->num_lan_tx;
-               vsi->alloc_rxq = pf->num_lan_rx;
-               vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
-               vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
-               break;
-       default:
-               dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
-                        vsi->type);
-               break;
-       }
+       pf->num_avail_sw_msix += 1;
+       ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID);
+       pf->num_avail_hw_msix += 1;
+       ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID);
 }
 
 /**
- * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
- * @vsi: VSI pointer
- * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
+ * @pf: board private structure
  *
- * On error: returns error code (negative)
- * On success: returns 0
+ * This sets up the handler for MSIX 0, which is used to manage the
+ * non-queue interrupts, e.g. AdminQ and errors.  This is not used
+ * when in MSI or Legacy interrupt mode.
  */
-static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
+static int ice_req_irq_msix_misc(struct ice_pf *pf)
 {
-       struct ice_pf *pf = vsi->back;
+       struct ice_hw *hw = &pf->hw;
+       int oicr_idx, err = 0;
+       u8 itr_gran;
+       u32 val;
 
-       /* allocate memory for both Tx and Rx ring pointers */
-       vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
-                                    sizeof(struct ice_ring *), GFP_KERNEL);
-       if (!vsi->tx_rings)
-               goto err_txrings;
-
-       vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
-                                    sizeof(struct ice_ring *), GFP_KERNEL);
-       if (!vsi->rx_rings)
-               goto err_rxrings;
-
-       if (alloc_qvectors) {
-               /* allocate memory for q_vector pointers */
-               vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
-                                             vsi->num_q_vectors,
-                                             sizeof(struct ice_q_vector *),
-                                             GFP_KERNEL);
-               if (!vsi->q_vectors)
-                       goto err_vectors;
-       }
+       if (!pf->int_name[0])
+               snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
+                        dev_driver_string(&pf->pdev->dev),
+                        dev_name(&pf->pdev->dev));
 
-       return 0;
+       /* Do not request IRQ but do enable OICR interrupt since settings are
+        * lost during reset. Note that this function is called only during
+        * rebuild path and not while reset is in progress.
+        */
+       if (ice_is_reset_in_progress(pf->state))
+               goto skip_req_irq;
 
-err_vectors:
-       devm_kfree(&pf->pdev->dev, vsi->rx_rings);
-err_rxrings:
-       devm_kfree(&pf->pdev->dev, vsi->tx_rings);
-err_txrings:
-       return -ENOMEM;
-}
+       /* reserve one vector in sw_irq_tracker for misc interrupts */
+       oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+       if (oicr_idx < 0)
+               return oicr_idx;
 
-/**
- * ice_msix_clean_rings - MSIX mode Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a q_vector
- */
-static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
-{
-       struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
+       pf->num_avail_sw_msix -= 1;
+       pf->sw_oicr_idx = oicr_idx;
 
-       if (!q_vector->tx.ring && !q_vector->rx.ring)
-               return IRQ_HANDLED;
-
-       napi_schedule(&q_vector->napi);
-
-       return IRQ_HANDLED;
-}
-
-/**
- * ice_vsi_alloc - Allocates the next available struct vsi in the PF
- * @pf: board private structure
- * @type: type of VSI
- *
- * returns a pointer to a VSI on success, NULL on failure.
- */
-static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
-{
-       struct ice_vsi *vsi = NULL;
-
-       /* Need to protect the allocation of the VSIs at the PF level */
-       mutex_lock(&pf->sw_mutex);
-
-       /* If we have already allocated our maximum number of VSIs,
-        * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
-        * is available to be populated
-        */
-       if (pf->next_vsi == ICE_NO_VSI) {
-               dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
-               goto unlock_pf;
-       }
-
-       vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
-       if (!vsi)
-               goto unlock_pf;
-
-       vsi->type = type;
-       vsi->back = pf;
-       set_bit(__ICE_DOWN, vsi->state);
-       vsi->idx = pf->next_vsi;
-       vsi->work_lmt = ICE_DFLT_IRQ_WORK;
-
-       ice_vsi_set_num_qs(vsi);
-
-       switch (vsi->type) {
-       case ICE_VSI_PF:
-               if (ice_vsi_alloc_arrays(vsi, true))
-                       goto err_rings;
-
-               /* Setup default MSIX irq handler for VSI */
-               vsi->irq_handler = ice_msix_clean_rings;
-               break;
-       default:
-               dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
-               goto unlock_pf;
-       }
-
-       /* fill VSI slot in the PF struct */
-       pf->vsi[pf->next_vsi] = vsi;
-
-       /* prepare pf->next_vsi for next use */
-       pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
-                                        pf->next_vsi);
-       goto unlock_pf;
-
-err_rings:
-       devm_kfree(&pf->pdev->dev, vsi);
-       vsi = NULL;
-unlock_pf:
-       mutex_unlock(&pf->sw_mutex);
-       return vsi;
-}
-
-/**
- * ice_free_irq_msix_misc - Unroll misc vector setup
- * @pf: board private structure
- */
-static void ice_free_irq_msix_misc(struct ice_pf *pf)
-{
-       /* disable OICR interrupt */
-       wr32(&pf->hw, PFINT_OICR_ENA, 0);
-       ice_flush(&pf->hw);
-
-       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
-               synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
-               devm_free_irq(&pf->pdev->dev,
-                             pf->msix_entries[pf->oicr_idx].vector, pf);
-       }
-
-       ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
-}
-
-/**
- * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
- * @pf: board private structure
- *
- * This sets up the handler for MSIX 0, which is used to manage the
- * non-queue interrupts, e.g. AdminQ and errors.  This is not used
- * when in MSI or Legacy interrupt mode.
- */
-static int ice_req_irq_msix_misc(struct ice_pf *pf)
-{
-       struct ice_hw *hw = &pf->hw;
-       int oicr_idx, err = 0;
-       u8 itr_gran;
-       u32 val;
-
-       if (!pf->int_name[0])
-               snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
-                        dev_driver_string(&pf->pdev->dev),
-                        dev_name(&pf->pdev->dev));
-
-       /* Do not request IRQ but do enable OICR interrupt since settings are
-        * lost during reset. Note that this function is called only during
-        * rebuild path and not while reset is in progress.
-        */
-       if (ice_is_reset_recovery_pending(pf->state))
-               goto skip_req_irq;
-
-       /* reserve one vector in irq_tracker for misc interrupts */
-       oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
-       if (oicr_idx < 0)
-               return oicr_idx;
-
-       pf->oicr_idx = oicr_idx;
+       /* reserve one vector in hw_irq_tracker for misc interrupts */
+       oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+       if (oicr_idx < 0) {
+               ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+               pf->num_avail_sw_msix += 1;
+               return oicr_idx;
+       }
+       pf->num_avail_hw_msix -= 1;
+       pf->hw_oicr_idx = oicr_idx;
 
        err = devm_request_irq(&pf->pdev->dev,
-                              pf->msix_entries[pf->oicr_idx].vector,
+                              pf->msix_entries[pf->sw_oicr_idx].vector,
                               ice_misc_intr, 0, pf->int_name, pf);
        if (err) {
                dev_err(&pf->pdev->dev,
                        "devm_request_irq for %s failed: %d\n",
                        pf->int_name, err);
-               ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+               ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+               pf->num_avail_sw_msix += 1;
+               ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+               pf->num_avail_hw_msix += 1;
                return err;
        }
 
 skip_req_irq:
        ice_ena_misc_vector(pf);
 
-       val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+       val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
               PFINT_OICR_CTL_CAUSE_ENA_M);
        wr32(hw, PFINT_OICR_CTL, val);
 
        /* This enables Admin queue Interrupt causes */
-       val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+       val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
               PFINT_FW_CTL_CAUSE_ENA_M);
        wr32(hw, PFINT_FW_CTL, val);
 
-       itr_gran = hw->itr_gran_200;
+       /* This enables Mailbox queue Interrupt causes */
+       val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
+              PFINT_MBX_CTL_CAUSE_ENA_M);
+       wr32(hw, PFINT_MBX_CTL, val);
+
+       itr_gran = hw->itr_gran;
 
-       wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
+       wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
             ITR_TO_REG(ICE_ITR_8K, itr_gran));
 
        ice_flush(hw);
@@ -2386,209 +1518,43 @@ skip_req_irq:
 }
 
 /**
- * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
- * @vsi: the VSI getting queues
- *
- * Return 0 on success and a negative value on error
- */
-static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
-{
-       struct ice_pf *pf = vsi->back;
-       int offset, ret = 0;
-
-       mutex_lock(&pf->avail_q_mutex);
-       /* look for contiguous block of queues for tx */
-       offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
-                                           0, vsi->alloc_txq, 0);
-       if (offset < ICE_MAX_TXQS) {
-               int i;
-
-               bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
-               for (i = 0; i < vsi->alloc_txq; i++)
-                       vsi->txq_map[i] = i + offset;
-       } else {
-               ret = -ENOMEM;
-               vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
-       }
-
-       /* look for contiguous block of queues for rx */
-       offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
-                                           0, vsi->alloc_rxq, 0);
-       if (offset < ICE_MAX_RXQS) {
-               int i;
-
-               bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
-               for (i = 0; i < vsi->alloc_rxq; i++)
-                       vsi->rxq_map[i] = i + offset;
-       } else {
-               ret = -ENOMEM;
-               vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
-       }
-       mutex_unlock(&pf->avail_q_mutex);
-
-       return ret;
-}
-
-/**
- * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
- * @vsi: the VSI getting queues
- *
- * Return 0 on success and a negative value on error
+ * ice_napi_del - Remove NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be removed
  */
-static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
+static void ice_napi_del(struct ice_vsi *vsi)
 {
-       struct ice_pf *pf = vsi->back;
-       int i, index = 0;
-
-       mutex_lock(&pf->avail_q_mutex);
-
-       if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
-               for (i = 0; i < vsi->alloc_txq; i++) {
-                       index = find_next_zero_bit(pf->avail_txqs,
-                                                  ICE_MAX_TXQS, index);
-                       if (index < ICE_MAX_TXQS) {
-                               set_bit(index, pf->avail_txqs);
-                               vsi->txq_map[i] = index;
-                       } else {
-                               goto err_scatter_tx;
-                       }
-               }
-       }
-
-       if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
-               for (i = 0; i < vsi->alloc_rxq; i++) {
-                       index = find_next_zero_bit(pf->avail_rxqs,
-                                                  ICE_MAX_RXQS, index);
-                       if (index < ICE_MAX_RXQS) {
-                               set_bit(index, pf->avail_rxqs);
-                               vsi->rxq_map[i] = index;
-                       } else {
-                               goto err_scatter_rx;
-                       }
-               }
-       }
-
-       mutex_unlock(&pf->avail_q_mutex);
-       return 0;
+       int v_idx;
 
-err_scatter_rx:
-       /* unflag any queues we have grabbed (i is failed position) */
-       for (index = 0; index < i; index++) {
-               clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
-               vsi->rxq_map[index] = 0;
-       }
-       i = vsi->alloc_txq;
-err_scatter_tx:
-       /* i is either position of failed attempt or vsi->alloc_txq */
-       for (index = 0; index < i; index++) {
-               clear_bit(vsi->txq_map[index], pf->avail_txqs);
-               vsi->txq_map[index] = 0;
-       }
+       if (!vsi->netdev)
+               return;
 
-       mutex_unlock(&pf->avail_q_mutex);
-       return -ENOMEM;
+       for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+               netif_napi_del(&vsi->q_vectors[v_idx]->napi);
 }
 
 /**
- * ice_vsi_get_qs - Assign queues from PF to VSI
- * @vsi: the VSI to assign queues to
+ * ice_napi_add - register NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be registered
  *
- * Returns 0 on success and a negative value on error
- */
-static int ice_vsi_get_qs(struct ice_vsi *vsi)
-{
-       int ret = 0;
-
-       vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
-       vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
-
-       /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping
-        * modes individually to scatter if assigning contiguous queues
-        * to rx or tx fails
-        */
-       ret = ice_vsi_get_qs_contig(vsi);
-       if (ret < 0) {
-               if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
-                       vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
-                                              ICE_MAX_SCATTER_TXQS);
-               if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
-                       vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
-                                              ICE_MAX_SCATTER_RXQS);
-               ret = ice_vsi_get_qs_scatter(vsi);
-       }
-
-       return ret;
-}
-
-/**
- * ice_vsi_put_qs - Release queues from VSI to PF
- * @vsi: the VSI thats going to release queues
- */
-static void ice_vsi_put_qs(struct ice_vsi *vsi)
-{
-       struct ice_pf *pf = vsi->back;
-       int i;
-
-       mutex_lock(&pf->avail_q_mutex);
-
-       for (i = 0; i < vsi->alloc_txq; i++) {
-               clear_bit(vsi->txq_map[i], pf->avail_txqs);
-               vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
-       }
-
-       for (i = 0; i < vsi->alloc_rxq; i++) {
-               clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
-               vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
-       }
-
-       mutex_unlock(&pf->avail_q_mutex);
-}
-
-/**
- * ice_free_q_vector - Free memory allocated for a specific interrupt vector
- * @vsi: VSI having the memory freed
- * @v_idx: index of the vector to be freed
+ * This function is only called in the driver's load path. Registering the NAPI
+ * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
+ * reset/rebuild, etc.)
  */
-static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
+static void ice_napi_add(struct ice_vsi *vsi)
 {
-       struct ice_q_vector *q_vector;
-       struct ice_ring *ring;
+       int v_idx;
 
-       if (!vsi->q_vectors[v_idx]) {
-               dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
-                       v_idx);
+       if (!vsi->netdev)
                return;
-       }
-       q_vector = vsi->q_vectors[v_idx];
-
-       ice_for_each_ring(ring, q_vector->tx)
-               ring->q_vector = NULL;
-       ice_for_each_ring(ring, q_vector->rx)
-               ring->q_vector = NULL;
-
-       /* only VSI with an associated netdev is set up with NAPI */
-       if (vsi->netdev)
-               netif_napi_del(&q_vector->napi);
-
-       devm_kfree(&vsi->back->pdev->dev, q_vector);
-       vsi->q_vectors[v_idx] = NULL;
-}
-
-/**
- * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
- * @vsi: the VSI having memory freed
- */
-static void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
-{
-       int v_idx;
 
        for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
-               ice_free_q_vector(vsi, v_idx);
+               netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
+                              ice_napi_poll, NAPI_POLL_WEIGHT);
 }
 
 /**
- * ice_cfg_netdev - Setup the netdev flags
- * @vsi: the VSI being configured
+ * ice_cfg_netdev - Allocate, configure and register a netdev
+ * @vsi: the VSI associated with the new netdev
  *
  * Returns 0 on success, negative value on failure
  */
@@ -2601,6 +1567,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
        struct ice_netdev_priv *np;
        struct net_device *netdev;
        u8 mac_addr[ETH_ALEN];
+       int err;
 
        netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv),
                                    vsi->alloc_txq, vsi->alloc_rxq);
@@ -2658,1979 +1625,999 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
        netdev->min_mtu = ETH_MIN_MTU;
        netdev->max_mtu = ICE_MAX_MTU;
 
+       err = register_netdev(vsi->netdev);
+       if (err)
+               return err;
+
+       netif_carrier_off(vsi->netdev);
+
+       /* make sure transmit queues start off as stopped */
+       netif_tx_stop_all_queues(vsi->netdev);
+
        return 0;
 }
 
 /**
- * ice_vsi_free_arrays - clean up vsi resources
- * @vsi: pointer to VSI being cleared
- * @free_qvectors: bool to specify if q_vectors should be deallocated
+ * ice_fill_rss_lut - Fill the RSS lookup table with default values
+ * @lut: Lookup table
+ * @rss_table_size: Lookup table size
+ * @rss_size: Range of queue number for hashing
  */
-static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
+void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
 {
-       struct ice_pf *pf = vsi->back;
+       u16 i;
 
-       /* free the ring and vector containers */
-       if (free_qvectors && vsi->q_vectors) {
-               devm_kfree(&pf->pdev->dev, vsi->q_vectors);
-               vsi->q_vectors = NULL;
-       }
-       if (vsi->tx_rings) {
-               devm_kfree(&pf->pdev->dev, vsi->tx_rings);
-               vsi->tx_rings = NULL;
-       }
-       if (vsi->rx_rings) {
-               devm_kfree(&pf->pdev->dev, vsi->rx_rings);
-               vsi->rx_rings = NULL;
-       }
+       for (i = 0; i < rss_table_size; i++)
+               lut[i] = i % rss_size;
 }
 
 /**
- * ice_vsi_clear - clean up and deallocate the provided vsi
- * @vsi: pointer to VSI being cleared
- *
- * This deallocates the vsi's queue resources, removes it from the PF's
- * VSI array if necessary, and deallocates the VSI
+ * ice_pf_vsi_setup - Set up a PF VSI
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
  *
- * Returns 0 on success, negative on failure
+ * Returns pointer to the successfully allocated VSI sw struct on success,
+ * otherwise returns NULL on failure.
  */
-static int ice_vsi_clear(struct ice_vsi *vsi)
+static struct ice_vsi *
+ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
 {
-       struct ice_pf *pf = NULL;
+       return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
+}
 
-       if (!vsi)
-               return 0;
+/**
+ * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload
+ * @netdev: network interface to be adjusted
+ * @proto: unused protocol
+ * @vid: vlan id to be added
+ *
+ * net_device_ops implementation for adding vlan ids
+ */
+static int ice_vlan_rx_add_vid(struct net_device *netdev,
+                              __always_unused __be16 proto, u16 vid)
+{
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_vsi *vsi = np->vsi;
+       int ret;
 
-       if (!vsi->back)
+       if (vid >= VLAN_N_VID) {
+               netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
+                          vid, VLAN_N_VID);
                return -EINVAL;
+       }
 
-       pf = vsi->back;
-
-       if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
-               dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
-                       vsi->idx);
+       if (vsi->info.pvid)
                return -EINVAL;
-       }
 
-       mutex_lock(&pf->sw_mutex);
-       /* updates the PF for this cleared vsi */
+       /* Enable VLAN pruning when VLAN 0 is added */
+       if (unlikely(!vid)) {
+               ret = ice_cfg_vlan_pruning(vsi, true);
+               if (ret)
+                       return ret;
+       }
 
-       pf->vsi[vsi->idx] = NULL;
-       if (vsi->idx < pf->next_vsi)
-               pf->next_vsi = vsi->idx;
+       /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
+        * needed to continue allowing all untagged packets since VLAN prune
+        * list is applied to all packets by the switch
+        */
+       ret = ice_vsi_add_vlan(vsi, vid);
 
-       ice_vsi_free_arrays(vsi, true);
-       mutex_unlock(&pf->sw_mutex);
-       devm_kfree(&pf->pdev->dev, vsi);
+       if (!ret)
+               set_bit(vid, vsi->active_vlans);
 
-       return 0;
+       return ret;
 }
 
 /**
- * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
- * @vsi: the VSI being configured
- * @v_idx: index of the vector in the vsi struct
+ * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
+ * @netdev: network interface to be adjusted
+ * @proto: unused protocol
+ * @vid: vlan id to be removed
  *
- * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ * net_device_ops implementation for removing vlan ids
  */
-static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
+static int ice_vlan_rx_kill_vid(struct net_device *netdev,
+                               __always_unused __be16 proto, u16 vid)
 {
-       struct ice_pf *pf = vsi->back;
-       struct ice_q_vector *q_vector;
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_vsi *vsi = np->vsi;
+       int status;
 
-       /* allocate q_vector */
-       q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
-       if (!q_vector)
-               return -ENOMEM;
+       if (vsi->info.pvid)
+               return -EINVAL;
+
+       /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
+        * information
+        */
+       status = ice_vsi_kill_vlan(vsi, vid);
+       if (status)
+               return status;
 
-       q_vector->vsi = vsi;
-       q_vector->v_idx = v_idx;
-       /* only set affinity_mask if the CPU is online */
-       if (cpu_online(v_idx))
-               cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+       clear_bit(vid, vsi->active_vlans);
 
-       if (vsi->netdev)
-               netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
-                              NAPI_POLL_WEIGHT);
-       /* tie q_vector and vsi together */
-       vsi->q_vectors[v_idx] = q_vector;
+       /* Disable VLAN pruning when VLAN 0 is removed */
+       if (unlikely(!vid))
+               status = ice_cfg_vlan_pruning(vsi, false);
 
-       return 0;
+       return status;
 }
 
 /**
- * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
- * @vsi: the VSI being configured
+ * ice_setup_pf_sw - Setup the HW switch on startup or after reset
+ * @pf: board private structure
  *
- * We allocate one q_vector per queue interrupt.  If allocation fails we
- * return -ENOMEM.
+ * Returns 0 on success, negative value on failure
  */
-static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
+static int ice_setup_pf_sw(struct ice_pf *pf)
 {
-       struct ice_pf *pf = vsi->back;
-       int v_idx = 0, num_q_vectors;
-       int err;
+       LIST_HEAD(tmp_add_list);
+       u8 broadcast[ETH_ALEN];
+       struct ice_vsi *vsi;
+       int status = 0;
 
-       if (vsi->q_vectors[0]) {
-               dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
-                       vsi->vsi_num);
-               return -EEXIST;
+       if (ice_is_reset_in_progress(pf->state))
+               return -EBUSY;
+
+       vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
+       if (!vsi) {
+               status = -ENOMEM;
+               goto unroll_vsi_setup;
        }
 
-       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
-               num_q_vectors = vsi->num_q_vectors;
-       } else {
-               err = -EINVAL;
-               goto err_out;
+       status = ice_cfg_netdev(vsi);
+       if (status) {
+               status = -ENODEV;
+               goto unroll_vsi_setup;
        }
 
-       for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
-               err = ice_vsi_alloc_q_vector(vsi, v_idx);
-               if (err)
-                       goto err_out;
+       /* registering the NAPI handler requires both the queues and
+        * netdev to be created, which are done in ice_pf_vsi_setup()
+        * and ice_cfg_netdev() respectively
+        */
+       ice_napi_add(vsi);
+
+       /* To add a MAC filter, first add the MAC to a list and then
+        * pass the list to ice_add_mac.
+        */
+
+        /* Add a unicast MAC filter so the VSI can get its packets */
+       status = ice_add_mac_to_list(vsi, &tmp_add_list,
+                                    vsi->port_info->mac.perm_addr);
+       if (status)
+               goto unroll_napi_add;
+
+       /* VSI needs to receive broadcast traffic, so add the broadcast
+        * MAC address to the list as well.
+        */
+       eth_broadcast_addr(broadcast);
+       status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
+       if (status)
+               goto free_mac_list;
+
+       /* program MAC filters for entries in tmp_add_list */
+       status = ice_add_mac(&pf->hw, &tmp_add_list);
+       if (status) {
+               dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
+               status = -ENOMEM;
+               goto free_mac_list;
        }
 
-       return 0;
+       ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+       return status;
 
-err_out:
-       while (v_idx--)
-               ice_free_q_vector(vsi, v_idx);
+free_mac_list:
+       ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
 
-       dev_err(&pf->pdev->dev,
-               "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
-               vsi->num_q_vectors, vsi->vsi_num, err);
-       vsi->num_q_vectors = 0;
-       return err;
+unroll_napi_add:
+       if (vsi) {
+               ice_napi_del(vsi);
+               if (vsi->netdev) {
+                       if (vsi->netdev->reg_state == NETREG_REGISTERED)
+                               unregister_netdev(vsi->netdev);
+                       free_netdev(vsi->netdev);
+                       vsi->netdev = NULL;
+               }
+       }
+
+unroll_vsi_setup:
+       if (vsi) {
+               ice_vsi_free_q_vectors(vsi);
+               ice_vsi_delete(vsi);
+               ice_vsi_put_qs(vsi);
+               pf->q_left_tx += vsi->alloc_txq;
+               pf->q_left_rx += vsi->alloc_rxq;
+               ice_vsi_clear(vsi);
+       }
+       return status;
 }
 
 /**
- * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
- * @vsi: ptr to the VSI
- *
- * This should only be called after ice_vsi_alloc() which allocates the
- * corresponding SW VSI structure and initializes num_queue_pairs for the
- * newly allocated VSI.
+ * ice_determine_q_usage - Calculate queue distribution
+ * @pf: board private structure
  *
- * Returns 0 on success or negative on failure
+ * Return -ENOMEM if we don't get enough queues for all ports
  */
-static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
+static void ice_determine_q_usage(struct ice_pf *pf)
 {
-       struct ice_pf *pf = vsi->back;
-       int num_q_vectors = 0;
-
-       if (vsi->base_vector) {
-               dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
-                       vsi->vsi_num, vsi->base_vector);
-               return -EEXIST;
-       }
-
-       if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
-               return -ENOENT;
+       u16 q_left_tx, q_left_rx;
 
-       switch (vsi->type) {
-       case ICE_VSI_PF:
-               num_q_vectors = vsi->num_q_vectors;
-               break;
-       default:
-               dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
-                        vsi->type);
-               break;
-       }
+       q_left_tx = pf->hw.func_caps.common_cap.num_txq;
+       q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
 
-       if (num_q_vectors)
-               vsi->base_vector = ice_get_res(pf, pf->irq_tracker,
-                                              num_q_vectors, vsi->idx);
+       pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
 
-       if (vsi->base_vector < 0) {
-               dev_err(&pf->pdev->dev,
-                       "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
-                       num_q_vectors, vsi->vsi_num, vsi->base_vector);
-               return -ENOENT;
-       }
+       /* only 1 rx queue unless RSS is enabled */
+       if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+               pf->num_lan_rx = 1;
+       else
+               pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus());
 
-       return 0;
+       pf->q_left_tx = q_left_tx - pf->num_lan_tx;
+       pf->q_left_rx = q_left_rx - pf->num_lan_rx;
 }
 
 /**
- * ice_fill_rss_lut - Fill the RSS lookup table with default values
- * @lut: Lookup table
- * @rss_table_size: Lookup table size
- * @rss_size: Range of queue number for hashing
+ * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
+ * @pf: board private structure to initialize
  */
-void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
+static void ice_deinit_pf(struct ice_pf *pf)
 {
-       u16 i;
-
-       for (i = 0; i < rss_table_size; i++)
-               lut[i] = i % rss_size;
+       ice_service_task_stop(pf);
+       mutex_destroy(&pf->sw_mutex);
+       mutex_destroy(&pf->avail_q_mutex);
 }
 
 /**
- * ice_vsi_cfg_rss - Configure RSS params for a VSI
- * @vsi: VSI to be configured
+ * ice_init_pf - Initialize general software structures (struct ice_pf)
+ * @pf: board private structure to initialize
  */
-static int ice_vsi_cfg_rss(struct ice_vsi *vsi)
+static void ice_init_pf(struct ice_pf *pf)
 {
-       u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
-       struct ice_aqc_get_set_rss_keys *key;
-       struct ice_pf *pf = vsi->back;
-       enum ice_status status;
-       int err = 0;
-       u8 *lut;
-
-       vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
-
-       lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
-       if (!lut)
-               return -ENOMEM;
-
-       if (vsi->rss_lut_user)
-               memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
-       else
-               ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
-
-       status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type,
-                                   lut, vsi->rss_table_size);
-
-       if (status) {
-               dev_err(&vsi->back->pdev->dev,
-                       "set_rss_lut failed, error %d\n", status);
-               err = -EIO;
-               goto ice_vsi_cfg_rss_exit;
-       }
+       bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
+       set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
+#ifdef CONFIG_PCI_IOV
+       if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
+               struct ice_hw *hw = &pf->hw;
 
-       key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
-       if (!key) {
-               err = -ENOMEM;
-               goto ice_vsi_cfg_rss_exit;
+               set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
+               pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs,
+                                             ICE_MAX_VF_COUNT);
        }
+#endif /* CONFIG_PCI_IOV */
 
-       if (vsi->rss_hkey_user)
-               memcpy(seed, vsi->rss_hkey_user,
-                      ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
-       else
-               netdev_rss_key_fill((void *)seed,
-                                   ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
-       memcpy(&key->standard_rss_key, seed,
-              ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+       mutex_init(&pf->sw_mutex);
+       mutex_init(&pf->avail_q_mutex);
 
-       status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key);
+       /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
+       mutex_lock(&pf->avail_q_mutex);
+       bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
+       bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
+       mutex_unlock(&pf->avail_q_mutex);
 
-       if (status) {
-               dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
-                       status);
-               err = -EIO;
-       }
+       if (pf->hw.func_caps.common_cap.rss_table_size)
+               set_bit(ICE_FLAG_RSS_ENA, pf->flags);
 
-       devm_kfree(&pf->pdev->dev, key);
-ice_vsi_cfg_rss_exit:
-       devm_kfree(&pf->pdev->dev, lut);
-       return err;
+       /* setup service timer and periodic service task */
+       timer_setup(&pf->serv_tmr, ice_service_timer, 0);
+       pf->serv_tmr_period = HZ;
+       INIT_WORK(&pf->serv_task, ice_service_task);
+       clear_bit(__ICE_SERVICE_SCHED, pf->state);
 }
 
 /**
- * ice_vsi_rebuild - Rebuild VSI after reset
- * @vsi: vsi to be rebuild
+ * ice_ena_msix_range - Request a range of MSIX vectors from the OS
+ * @pf: board private structure
  *
- * Returns 0 on success and negative value on failure
+ * compute the number of MSIX vectors required (v_budget) and request from
+ * the OS. Return the number of vectors reserved or negative on failure
  */
-static int ice_vsi_rebuild(struct ice_vsi *vsi)
+static int ice_ena_msix_range(struct ice_pf *pf)
 {
-       u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
-       int ret, i;
+       int v_left, v_actual, v_budget = 0;
+       int needed, err, i;
 
-       if (!vsi)
-               return -EINVAL;
+       v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
 
-       ice_vsi_free_q_vectors(vsi);
-       ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
-       vsi->base_vector = 0;
-       ice_vsi_clear_rings(vsi);
-       ice_vsi_free_arrays(vsi, false);
-       ice_vsi_set_num_qs(vsi);
-
-       /* Initialize VSI struct elements and create VSI in FW */
-       ret = ice_vsi_init(vsi);
-       if (ret < 0)
-               goto err_vsi;
-
-       ret = ice_vsi_alloc_arrays(vsi, false);
-       if (ret < 0)
-               goto err_vsi;
-
-       switch (vsi->type) {
-       case ICE_VSI_PF:
-               /* fall through */
-               ret = ice_vsi_alloc_q_vectors(vsi);
-               if (ret)
-                       goto err_rings;
+       /* reserve one vector for miscellaneous handler */
+       needed = 1;
+       v_budget += needed;
+       v_left -= needed;
 
-               ret = ice_vsi_setup_vector_base(vsi);
-               if (ret)
-                       goto err_vectors;
+       /* reserve vectors for LAN traffic */
+       pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
+       v_budget += pf->num_lan_msix;
+       v_left -= pf->num_lan_msix;
 
-               ret = ice_vsi_alloc_rings(vsi);
-               if (ret)
-                       goto err_vectors;
+       pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
+                                       sizeof(struct msix_entry), GFP_KERNEL);
 
-               ice_vsi_map_rings_to_vectors(vsi);
-               break;
-       default:
-               break;
+       if (!pf->msix_entries) {
+               err = -ENOMEM;
+               goto exit_err;
        }
 
-       ice_vsi_set_tc_cfg(vsi);
+       for (i = 0; i < v_budget; i++)
+               pf->msix_entries[i].entry = i;
 
-       /* configure VSI nodes based on number of queues and TC's */
-       for (i = 0; i < vsi->tc_cfg.numtc; i++)
-               max_txqs[i] = vsi->num_txq;
+       /* actually reserve the vectors */
+       v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
+                                        ICE_MIN_MSIX, v_budget);
 
-       ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
-                             vsi->tc_cfg.ena_tc, max_txqs);
-       if (ret) {
-               dev_info(&vsi->back->pdev->dev,
-                        "Failed VSI lan queue config\n");
-               goto err_vectors;
+       if (v_actual < 0) {
+               dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
+               err = v_actual;
+               goto msix_err;
        }
-       return 0;
 
-err_vectors:
-       ice_vsi_free_q_vectors(vsi);
-err_rings:
-       if (vsi->netdev) {
-               vsi->current_netdev_flags = 0;
-               unregister_netdev(vsi->netdev);
-               free_netdev(vsi->netdev);
-               vsi->netdev = NULL;
+       if (v_actual < v_budget) {
+               dev_warn(&pf->pdev->dev,
+                        "not enough vectors. requested = %d, obtained = %d\n",
+                        v_budget, v_actual);
+               if (v_actual >= (pf->num_lan_msix + 1)) {
+                       pf->num_avail_sw_msix = v_actual -
+                                               (pf->num_lan_msix + 1);
+               } else if (v_actual >= 2) {
+                       pf->num_lan_msix = 1;
+                       pf->num_avail_sw_msix = v_actual - 2;
+               } else {
+                       pci_disable_msix(pf->pdev);
+                       err = -ERANGE;
+                       goto msix_err;
+               }
        }
-err_vsi:
-       ice_vsi_clear(vsi);
-       set_bit(__ICE_RESET_FAILED, vsi->back->state);
-       return ret;
+
+       return v_actual;
+
+msix_err:
+       devm_kfree(&pf->pdev->dev, pf->msix_entries);
+       goto exit_err;
+
+exit_err:
+       pf->num_lan_msix = 0;
+       clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
+       return err;
 }
 
 /**
- * ice_vsi_setup - Set up a VSI by a given type
+ * ice_dis_msix - Disable MSI-X interrupt setup in OS
  * @pf: board private structure
- * @pi: pointer to the port_info instance
- * @type: VSI type
- * @vf_id: defines VF id to which this VSI connects. This field is meant to be
- *         used only for ICE_VSI_VF VSI type. For other VSI types, should
- *         fill-in ICE_INVAL_VFID as input.
- *
- * This allocates the sw VSI structure and its queue resources.
- *
- * Returns pointer to the successfully allocated and configured VSI sw struct on
- * success, NULL on failure.
  */
-static struct ice_vsi *
-ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
-             enum ice_vsi_type type, u16 __always_unused vf_id)
+static void ice_dis_msix(struct ice_pf *pf)
 {
-       u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
-       struct device *dev = &pf->pdev->dev;
-       struct ice_vsi *vsi;
-       int ret, i;
+       pci_disable_msix(pf->pdev);
+       devm_kfree(&pf->pdev->dev, pf->msix_entries);
+       pf->msix_entries = NULL;
+       clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
+}
 
-       vsi = ice_vsi_alloc(pf, type);
-       if (!vsi) {
-               dev_err(dev, "could not allocate VSI\n");
-               return NULL;
-       }
+/**
+ * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
+ * @pf: board private structure
+ */
+static void ice_clear_interrupt_scheme(struct ice_pf *pf)
+{
+       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+               ice_dis_msix(pf);
 
-       vsi->port_info = pi;
-       vsi->vsw = pf->first_sw;
+       if (pf->sw_irq_tracker) {
+               devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker);
+               pf->sw_irq_tracker = NULL;
+       }
 
-       if (ice_vsi_get_qs(vsi)) {
-               dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
-                       vsi->idx);
-               goto err_get_qs;
+       if (pf->hw_irq_tracker) {
+               devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker);
+               pf->hw_irq_tracker = NULL;
        }
+}
 
-       /* set RSS capabilities */
-       ice_vsi_set_rss_params(vsi);
+/**
+ * ice_init_interrupt_scheme - Determine proper interrupt scheme
+ * @pf: board private structure to initialize
+ */
+static int ice_init_interrupt_scheme(struct ice_pf *pf)
+{
+       int vectors = 0, hw_vectors = 0;
+       ssize_t size;
 
-       /* create the VSI */
-       ret = ice_vsi_init(vsi);
-       if (ret)
-               goto err_vsi;
+       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+               vectors = ice_ena_msix_range(pf);
+       else
+               return -ENODEV;
 
-       switch (vsi->type) {
-       case ICE_VSI_PF:
-               ret = ice_cfg_netdev(vsi);
-               if (ret)
-                       goto err_cfg_netdev;
+       if (vectors < 0)
+               return vectors;
 
-               ret = register_netdev(vsi->netdev);
-               if (ret)
-                       goto err_register_netdev;
-
-               netif_carrier_off(vsi->netdev);
-
-               /* make sure transmit queues start off as stopped */
-               netif_tx_stop_all_queues(vsi->netdev);
-               ret = ice_vsi_alloc_q_vectors(vsi);
-               if (ret)
-                       goto err_msix;
-
-               ret = ice_vsi_setup_vector_base(vsi);
-               if (ret)
-                       goto err_rings;
-
-               ret = ice_vsi_alloc_rings(vsi);
-               if (ret)
-                       goto err_rings;
-
-               ice_vsi_map_rings_to_vectors(vsi);
+       /* set up vector assignment tracking */
+       size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
 
-               /* Do not exit if configuring RSS had an issue, at least
-                * receive traffic on first queue. Hence no need to capture
-                * return value
-                */
-               if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
-                       ice_vsi_cfg_rss(vsi);
-               break;
-       default:
-               /* if vsi type is not recognized, clean up the resources and
-                * exit
-                */
-               goto err_rings;
+       pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+       if (!pf->sw_irq_tracker) {
+               ice_dis_msix(pf);
+               return -ENOMEM;
        }
 
-       ice_vsi_set_tc_cfg(vsi);
+       /* populate SW interrupts pool with number of OS granted IRQs. */
+       pf->num_avail_sw_msix = vectors;
+       pf->sw_irq_tracker->num_entries = vectors;
 
-       /* configure VSI nodes based on number of queues and TC's */
-       for (i = 0; i < vsi->tc_cfg.numtc; i++)
-               max_txqs[i] = vsi->num_txq;
+       /* set up HW vector assignment tracking */
+       hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
+       size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors);
 
-       ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
-                             vsi->tc_cfg.ena_tc, max_txqs);
-       if (ret) {
-               dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
-               goto err_rings;
+       pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+       if (!pf->hw_irq_tracker) {
+               ice_clear_interrupt_scheme(pf);
+               return -ENOMEM;
        }
 
-       return vsi;
+       /* populate HW interrupts pool with number of HW supported irqs. */
+       pf->num_avail_hw_msix = hw_vectors;
+       pf->hw_irq_tracker->num_entries = hw_vectors;
 
-err_rings:
-       ice_vsi_free_q_vectors(vsi);
-err_msix:
-       if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
-               unregister_netdev(vsi->netdev);
-err_register_netdev:
-       if (vsi->netdev) {
-               free_netdev(vsi->netdev);
-               vsi->netdev = NULL;
-       }
-err_cfg_netdev:
-       ice_vsi_delete(vsi);
-err_vsi:
-       ice_vsi_put_qs(vsi);
-err_get_qs:
-       pf->q_left_tx += vsi->alloc_txq;
-       pf->q_left_rx += vsi->alloc_rxq;
-       ice_vsi_clear(vsi);
-
-       return NULL;
+       return 0;
 }
 
 /**
- * ice_pf_vsi_setup - Set up a PF VSI
- * @pf: board private structure
- * @pi: pointer to the port_info instance
+ * ice_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ice_pci_tbl
  *
- * Returns pointer to the successfully allocated VSI sw struct on success,
- * otherwise returns NULL on failure.
+ * Returns 0 on success, negative on failure
  */
-static struct ice_vsi *
-ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
+static int ice_probe(struct pci_dev *pdev,
+                    const struct pci_device_id __always_unused *ent)
 {
-       return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
-}
+       struct ice_pf *pf;
+       struct ice_hw *hw;
+       int err;
 
-/**
- * ice_vsi_add_vlan - Add vsi membership for given vlan
- * @vsi: the vsi being configured
- * @vid: vlan id to be added
- */
-static int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
-{
-       struct ice_fltr_list_entry *tmp;
-       struct ice_pf *pf = vsi->back;
-       LIST_HEAD(tmp_add_list);
-       enum ice_status status;
-       int err = 0;
+       /* this driver uses devres, see Documentation/driver-model/devres.txt */
+       err = pcim_enable_device(pdev);
+       if (err)
+               return err;
+
+       err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
+       if (err) {
+               dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
+               return err;
+       }
 
-       tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
-       if (!tmp)
+       pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
+       if (!pf)
                return -ENOMEM;
 
-       tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
-       tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
-       tmp->fltr_info.flag = ICE_FLTR_TX;
-       tmp->fltr_info.src = vsi->vsi_num;
-       tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
-       tmp->fltr_info.l_data.vlan.vlan_id = vid;
+       /* set up for high or low dma */
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (err)
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+               return err;
+       }
+
+       pci_enable_pcie_error_reporting(pdev);
+       pci_set_master(pdev);
 
-       INIT_LIST_HEAD(&tmp->list_entry);
-       list_add(&tmp->list_entry, &tmp_add_list);
+       pf->pdev = pdev;
+       pci_set_drvdata(pdev, pf);
+       set_bit(__ICE_DOWN, pf->state);
+       /* Disable service task until DOWN bit is cleared */
+       set_bit(__ICE_SERVICE_DIS, pf->state);
 
-       status = ice_add_vlan(&pf->hw, &tmp_add_list);
-       if (status) {
-               err = -ENODEV;
-               dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
-                       vid, vsi->vsi_num);
+       hw = &pf->hw;
+       hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
+       hw->back = pf;
+       hw->vendor_id = pdev->vendor;
+       hw->device_id = pdev->device;
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+       hw->subsystem_vendor_id = pdev->subsystem_vendor;
+       hw->subsystem_device_id = pdev->subsystem_device;
+       hw->bus.device = PCI_SLOT(pdev->devfn);
+       hw->bus.func = PCI_FUNC(pdev->devfn);
+       ice_set_ctrlq_len(hw);
+
+       pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
+
+#ifndef CONFIG_DYNAMIC_DEBUG
+       if (debug < -1)
+               hw->debug_mask = debug;
+#endif
+
+       err = ice_init_hw(hw);
+       if (err) {
+               dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
+               err = -EIO;
+               goto err_exit_unroll;
        }
 
-       ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
-       return err;
-}
+       dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
+                hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
+                hw->api_maj_ver, hw->api_min_ver);
 
-/**
- * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload
- * @netdev: network interface to be adjusted
- * @proto: unused protocol
- * @vid: vlan id to be added
- *
- * net_device_ops implementation for adding vlan ids
- */
-static int ice_vlan_rx_add_vid(struct net_device *netdev,
-                              __always_unused __be16 proto, u16 vid)
-{
-       struct ice_netdev_priv *np = netdev_priv(netdev);
-       struct ice_vsi *vsi = np->vsi;
-       int ret;
+       ice_init_pf(pf);
 
-       if (vid >= VLAN_N_VID) {
-               netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
-                          vid, VLAN_N_VID);
-               return -EINVAL;
+       ice_determine_q_usage(pf);
+
+       pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
+                                 hw->func_caps.guaranteed_num_vsi);
+       if (!pf->num_alloc_vsi) {
+               err = -EIO;
+               goto err_init_pf_unroll;
        }
 
-       if (vsi->info.pvid)
-               return -EINVAL;
+       pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
+                              sizeof(struct ice_vsi *), GFP_KERNEL);
+       if (!pf->vsi) {
+               err = -ENOMEM;
+               goto err_init_pf_unroll;
+       }
 
-       /* Enable VLAN pruning when VLAN 0 is added */
-       if (unlikely(!vid)) {
-               ret = ice_cfg_vlan_pruning(vsi, true);
-               if (ret)
-                       return ret;
+       err = ice_init_interrupt_scheme(pf);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "ice_init_interrupt_scheme failed: %d\n", err);
+               err = -EIO;
+               goto err_init_interrupt_unroll;
        }
 
-       /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
-        * needed to continue allowing all untagged packets since VLAN prune
-        * list is applied to all packets by the switch
+       /* Driver is mostly up */
+       clear_bit(__ICE_DOWN, pf->state);
+
+       /* In case of MSIX we are going to setup the misc vector right here
+        * to handle admin queue events etc. In case of legacy and MSI
+        * the misc functionality and queue processing is combined in
+        * the same vector and that gets setup at open.
         */
-       ret = ice_vsi_add_vlan(vsi, vid);
+       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+               err = ice_req_irq_msix_misc(pf);
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "setup of misc vector failed: %d\n", err);
+                       goto err_init_interrupt_unroll;
+               }
+       }
 
-       if (!ret)
-               set_bit(vid, vsi->active_vlans);
+       /* create switch struct for the switch element created by FW on boot */
+       pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw),
+                                   GFP_KERNEL);
+       if (!pf->first_sw) {
+               err = -ENOMEM;
+               goto err_msix_misc_unroll;
+       }
 
-       return ret;
-}
+       if (hw->evb_veb)
+               pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
+       else
+               pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
 
-/**
- * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
- * @vsi: the VSI being configured
- * @vid: VLAN id to be removed
- *
- * Returns 0 on success and negative on failure
- */
-static int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
-{
-       struct ice_fltr_list_entry *list;
-       struct ice_pf *pf = vsi->back;
-       LIST_HEAD(tmp_add_list);
-       int status = 0;
+       pf->first_sw->pf = pf;
 
-       list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
-       if (!list)
-               return -ENOMEM;
+       /* record the sw_id available for later use */
+       pf->first_sw->sw_id = hw->port_info->sw_id;
+
+       err = ice_setup_pf_sw(pf);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "probe failed due to setup pf switch:%d\n", err);
+               goto err_alloc_sw_unroll;
+       }
 
-       list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
-       list->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
-       list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
-       list->fltr_info.l_data.vlan.vlan_id = vid;
-       list->fltr_info.flag = ICE_FLTR_TX;
-       list->fltr_info.src = vsi->vsi_num;
+       clear_bit(__ICE_SERVICE_DIS, pf->state);
 
-       INIT_LIST_HEAD(&list->list_entry);
-       list_add(&list->list_entry, &tmp_add_list);
+       /* since everything is good, start the service timer */
+       mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
 
-       if (ice_remove_vlan(&pf->hw, &tmp_add_list)) {
-               dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
-                       vid, vsi->vsi_num);
-               status = -EIO;
+       err = ice_init_link_events(pf->hw.port_info);
+       if (err) {
+               dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err);
+               goto err_alloc_sw_unroll;
        }
 
-       ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
-       return status;
+       return 0;
+
+err_alloc_sw_unroll:
+       set_bit(__ICE_SERVICE_DIS, pf->state);
+       set_bit(__ICE_DOWN, pf->state);
+       devm_kfree(&pf->pdev->dev, pf->first_sw);
+err_msix_misc_unroll:
+       ice_free_irq_msix_misc(pf);
+err_init_interrupt_unroll:
+       ice_clear_interrupt_scheme(pf);
+       devm_kfree(&pdev->dev, pf->vsi);
+err_init_pf_unroll:
+       ice_deinit_pf(pf);
+       ice_deinit_hw(hw);
+err_exit_unroll:
+       pci_disable_pcie_error_reporting(pdev);
+       return err;
 }
 
 /**
- * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
- * @netdev: network interface to be adjusted
- * @proto: unused protocol
- * @vid: vlan id to be removed
- *
- * net_device_ops implementation for removing vlan ids
+ * ice_remove - Device removal routine
+ * @pdev: PCI device information struct
  */
-static int ice_vlan_rx_kill_vid(struct net_device *netdev,
-                               __always_unused __be16 proto, u16 vid)
+static void ice_remove(struct pci_dev *pdev)
 {
-       struct ice_netdev_priv *np = netdev_priv(netdev);
-       struct ice_vsi *vsi = np->vsi;
-       int status;
+       struct ice_pf *pf = pci_get_drvdata(pdev);
+       int i;
 
-       if (vsi->info.pvid)
-               return -EINVAL;
+       if (!pf)
+               return;
 
-       /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
-        * information
-        */
-       status = ice_vsi_kill_vlan(vsi, vid);
-       if (status)
-               return status;
-
-       clear_bit(vid, vsi->active_vlans);
-
-       /* Disable VLAN pruning when VLAN 0 is removed */
-       if (unlikely(!vid))
-               status = ice_cfg_vlan_pruning(vsi, false);
-
-       return status;
-}
-
-/**
- * ice_setup_pf_sw - Setup the HW switch on startup or after reset
- * @pf: board private structure
- *
- * Returns 0 on success, negative value on failure
- */
-static int ice_setup_pf_sw(struct ice_pf *pf)
-{
-       LIST_HEAD(tmp_add_list);
-       u8 broadcast[ETH_ALEN];
-       struct ice_vsi *vsi;
-       int status = 0;
-
-       if (ice_is_reset_recovery_pending(pf->state))
-               return -EBUSY;
-
-       vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
-       if (!vsi) {
-               status = -ENOMEM;
-               goto unroll_vsi_setup;
-       }
-
-       /* To add a MAC filter, first add the MAC to a list and then
-        * pass the list to ice_add_mac.
-        */
-
-        /* Add a unicast MAC filter so the VSI can get its packets */
-       status = ice_add_mac_to_list(vsi, &tmp_add_list,
-                                    vsi->port_info->mac.perm_addr);
-       if (status)
-               goto unroll_vsi_setup;
-
-       /* VSI needs to receive broadcast traffic, so add the broadcast
-        * MAC address to the list as well.
-        */
-       eth_broadcast_addr(broadcast);
-       status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
-       if (status)
-               goto free_mac_list;
-
-       /* program MAC filters for entries in tmp_add_list */
-       status = ice_add_mac(&pf->hw, &tmp_add_list);
-       if (status) {
-               dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
-               status = -ENOMEM;
-               goto free_mac_list;
-       }
-
-       ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
-       return status;
-
-free_mac_list:
-       ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
-
-unroll_vsi_setup:
-       if (vsi) {
-               ice_vsi_free_q_vectors(vsi);
-               if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
-                       unregister_netdev(vsi->netdev);
-               if (vsi->netdev) {
-                       free_netdev(vsi->netdev);
-                       vsi->netdev = NULL;
-               }
+       set_bit(__ICE_DOWN, pf->state);
+       ice_service_task_stop(pf);
 
-               ice_vsi_delete(vsi);
-               ice_vsi_put_qs(vsi);
-               pf->q_left_tx += vsi->alloc_txq;
-               pf->q_left_rx += vsi->alloc_rxq;
-               ice_vsi_clear(vsi);
+       if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
+               ice_free_vfs(pf);
+       ice_vsi_release_all(pf);
+       ice_free_irq_msix_misc(pf);
+       ice_for_each_vsi(pf, i) {
+               if (!pf->vsi[i])
+                       continue;
+               ice_vsi_free_q_vectors(pf->vsi[i]);
        }
-       return status;
+       ice_clear_interrupt_scheme(pf);
+       ice_deinit_pf(pf);
+       ice_deinit_hw(&pf->hw);
+       pci_disable_pcie_error_reporting(pdev);
 }
 
-/**
- * ice_determine_q_usage - Calculate queue distribution
- * @pf: board private structure
+/* ice_pci_tbl - PCI Device ID Table
  *
- * Return -ENOMEM if we don't get enough queues for all ports
- */
-static void ice_determine_q_usage(struct ice_pf *pf)
-{
-       u16 q_left_tx, q_left_rx;
-
-       q_left_tx = pf->hw.func_caps.common_cap.num_txq;
-       q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
-
-       pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
-
-       /* only 1 rx queue unless RSS is enabled */
-       if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
-               pf->num_lan_rx = 1;
-       else
-               pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus());
-
-       pf->q_left_tx = q_left_tx - pf->num_lan_tx;
-       pf->q_left_rx = q_left_rx - pf->num_lan_rx;
-}
-
-/**
- * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
- * @pf: board private structure to initialize
- */
-static void ice_deinit_pf(struct ice_pf *pf)
-{
-       ice_service_task_stop(pf);
-       mutex_destroy(&pf->sw_mutex);
-       mutex_destroy(&pf->avail_q_mutex);
-}
-
-/**
- * ice_init_pf - Initialize general software structures (struct ice_pf)
- * @pf: board private structure to initialize
- */
-static void ice_init_pf(struct ice_pf *pf)
-{
-       bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
-       set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
-
-       mutex_init(&pf->sw_mutex);
-       mutex_init(&pf->avail_q_mutex);
-
-       /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
-       mutex_lock(&pf->avail_q_mutex);
-       bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
-       bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
-       mutex_unlock(&pf->avail_q_mutex);
-
-       if (pf->hw.func_caps.common_cap.rss_table_size)
-               set_bit(ICE_FLAG_RSS_ENA, pf->flags);
-
-       /* setup service timer and periodic service task */
-       timer_setup(&pf->serv_tmr, ice_service_timer, 0);
-       pf->serv_tmr_period = HZ;
-       INIT_WORK(&pf->serv_task, ice_service_task);
-       clear_bit(__ICE_SERVICE_SCHED, pf->state);
-}
-
-/**
- * ice_ena_msix_range - Request a range of MSIX vectors from the OS
- * @pf: board private structure
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
  *
- * compute the number of MSIX vectors required (v_budget) and request from
- * the OS. Return the number of vectors reserved or negative on failure
- */
-static int ice_ena_msix_range(struct ice_pf *pf)
-{
-       int v_left, v_actual, v_budget = 0;
-       int needed, err, i;
-
-       v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
-
-       /* reserve one vector for miscellaneous handler */
-       needed = 1;
-       v_budget += needed;
-       v_left -= needed;
-
-       /* reserve vectors for LAN traffic */
-       pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
-       v_budget += pf->num_lan_msix;
-
-       pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
-                                       sizeof(struct msix_entry), GFP_KERNEL);
-
-       if (!pf->msix_entries) {
-               err = -ENOMEM;
-               goto exit_err;
-       }
-
-       for (i = 0; i < v_budget; i++)
-               pf->msix_entries[i].entry = i;
-
-       /* actually reserve the vectors */
-       v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
-                                        ICE_MIN_MSIX, v_budget);
-
-       if (v_actual < 0) {
-               dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
-               err = v_actual;
-               goto msix_err;
-       }
-
-       if (v_actual < v_budget) {
-               dev_warn(&pf->pdev->dev,
-                        "not enough vectors. requested = %d, obtained = %d\n",
-                        v_budget, v_actual);
-               if (v_actual >= (pf->num_lan_msix + 1)) {
-                       pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1);
-               } else if (v_actual >= 2) {
-                       pf->num_lan_msix = 1;
-                       pf->num_avail_msix = v_actual - 2;
-               } else {
-                       pci_disable_msix(pf->pdev);
-                       err = -ERANGE;
-                       goto msix_err;
-               }
-       }
-
-       return v_actual;
-
-msix_err:
-       devm_kfree(&pf->pdev->dev, pf->msix_entries);
-       goto exit_err;
-
-exit_err:
-       pf->num_lan_msix = 0;
-       clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
-       return err;
-}
-
-/**
- * ice_dis_msix - Disable MSI-X interrupt setup in OS
- * @pf: board private structure
- */
-static void ice_dis_msix(struct ice_pf *pf)
-{
-       pci_disable_msix(pf->pdev);
-       devm_kfree(&pf->pdev->dev, pf->msix_entries);
-       pf->msix_entries = NULL;
-       clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
-}
-
-/**
- * ice_init_interrupt_scheme - Determine proper interrupt scheme
- * @pf: board private structure to initialize
- */
-static int ice_init_interrupt_scheme(struct ice_pf *pf)
-{
-       int vectors = 0;
-       ssize_t size;
-
-       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
-               vectors = ice_ena_msix_range(pf);
-       else
-               return -ENODEV;
-
-       if (vectors < 0)
-               return vectors;
-
-       /* set up vector assignment tracking */
-       size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
-
-       pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
-       if (!pf->irq_tracker) {
-               ice_dis_msix(pf);
-               return -ENOMEM;
-       }
-
-       pf->irq_tracker->num_entries = vectors;
-
-       return 0;
-}
-
-/**
- * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
- * @pf: board private structure
- */
-static void ice_clear_interrupt_scheme(struct ice_pf *pf)
-{
-       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
-               ice_dis_msix(pf);
-
-       if (pf->irq_tracker) {
-               devm_kfree(&pf->pdev->dev, pf->irq_tracker);
-               pf->irq_tracker = NULL;
-       }
-}
-
-/**
- * ice_probe - Device initialization routine
- * @pdev: PCI device information struct
- * @ent: entry in ice_pci_tbl
- *
- * Returns 0 on success, negative on failure
- */
-static int ice_probe(struct pci_dev *pdev,
-                    const struct pci_device_id __always_unused *ent)
-{
-       struct ice_pf *pf;
-       struct ice_hw *hw;
-       int err;
-
-       /* this driver uses devres, see Documentation/driver-model/devres.txt */
-       err = pcim_enable_device(pdev);
-       if (err)
-               return err;
-
-       err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
-       if (err) {
-               dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
-               return err;
-       }
-
-       pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
-       if (!pf)
-               return -ENOMEM;
-
-       /* set up for high or low dma */
-       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-       if (err)
-               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-       if (err) {
-               dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
-               return err;
-       }
-
-       pci_enable_pcie_error_reporting(pdev);
-       pci_set_master(pdev);
-
-       pf->pdev = pdev;
-       pci_set_drvdata(pdev, pf);
-       set_bit(__ICE_DOWN, pf->state);
-       /* Disable service task until DOWN bit is cleared */
-       set_bit(__ICE_SERVICE_DIS, pf->state);
-
-       hw = &pf->hw;
-       hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
-       hw->back = pf;
-       hw->vendor_id = pdev->vendor;
-       hw->device_id = pdev->device;
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
-       hw->subsystem_vendor_id = pdev->subsystem_vendor;
-       hw->subsystem_device_id = pdev->subsystem_device;
-       hw->bus.device = PCI_SLOT(pdev->devfn);
-       hw->bus.func = PCI_FUNC(pdev->devfn);
-       ice_set_ctrlq_len(hw);
-
-       pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
-
-#ifndef CONFIG_DYNAMIC_DEBUG
-       if (debug < -1)
-               hw->debug_mask = debug;
-#endif
-
-       err = ice_init_hw(hw);
-       if (err) {
-               dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
-               err = -EIO;
-               goto err_exit_unroll;
-       }
-
-       dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
-                hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
-                hw->api_maj_ver, hw->api_min_ver);
-
-       ice_init_pf(pf);
-
-       ice_determine_q_usage(pf);
-
-       pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
-                                 hw->func_caps.guaranteed_num_vsi);
-       if (!pf->num_alloc_vsi) {
-               err = -EIO;
-               goto err_init_pf_unroll;
-       }
-
-       pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
-                              sizeof(struct ice_vsi *), GFP_KERNEL);
-       if (!pf->vsi) {
-               err = -ENOMEM;
-               goto err_init_pf_unroll;
-       }
-
-       err = ice_init_interrupt_scheme(pf);
-       if (err) {
-               dev_err(&pdev->dev,
-                       "ice_init_interrupt_scheme failed: %d\n", err);
-               err = -EIO;
-               goto err_init_interrupt_unroll;
-       }
-
-       /* Driver is mostly up */
-       clear_bit(__ICE_DOWN, pf->state);
-
-       /* In case of MSIX we are going to setup the misc vector right here
-        * to handle admin queue events etc. In case of legacy and MSI
-        * the misc functionality and queue processing is combined in
-        * the same vector and that gets setup at open.
-        */
-       if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
-               err = ice_req_irq_msix_misc(pf);
-               if (err) {
-                       dev_err(&pdev->dev,
-                               "setup of misc vector failed: %d\n", err);
-                       goto err_init_interrupt_unroll;
-               }
-       }
-
-       /* create switch struct for the switch element created by FW on boot */
-       pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw),
-                                   GFP_KERNEL);
-       if (!pf->first_sw) {
-               err = -ENOMEM;
-               goto err_msix_misc_unroll;
-       }
-
-       if (hw->evb_veb)
-               pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
-       else
-               pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
-
-       pf->first_sw->pf = pf;
-
-       /* record the sw_id available for later use */
-       pf->first_sw->sw_id = hw->port_info->sw_id;
-
-       err = ice_setup_pf_sw(pf);
-       if (err) {
-               dev_err(&pdev->dev,
-                       "probe failed due to setup pf switch:%d\n", err);
-               goto err_alloc_sw_unroll;
-       }
-
-       clear_bit(__ICE_SERVICE_DIS, pf->state);
-
-       /* since everything is good, start the service timer */
-       mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
-
-       err = ice_init_link_events(pf->hw.port_info);
-       if (err) {
-               dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err);
-               goto err_alloc_sw_unroll;
-       }
-
-       return 0;
-
-err_alloc_sw_unroll:
-       set_bit(__ICE_SERVICE_DIS, pf->state);
-       set_bit(__ICE_DOWN, pf->state);
-       devm_kfree(&pf->pdev->dev, pf->first_sw);
-err_msix_misc_unroll:
-       ice_free_irq_msix_misc(pf);
-err_init_interrupt_unroll:
-       ice_clear_interrupt_scheme(pf);
-       devm_kfree(&pdev->dev, pf->vsi);
-err_init_pf_unroll:
-       ice_deinit_pf(pf);
-       ice_deinit_hw(hw);
-err_exit_unroll:
-       pci_disable_pcie_error_reporting(pdev);
-       return err;
-}
-
-/**
- * ice_remove - Device removal routine
- * @pdev: PCI device information struct
- */
-static void ice_remove(struct pci_dev *pdev)
-{
-       struct ice_pf *pf = pci_get_drvdata(pdev);
-
-       if (!pf)
-               return;
-
-       set_bit(__ICE_DOWN, pf->state);
-       ice_service_task_stop(pf);
-
-       ice_vsi_release_all(pf);
-       ice_free_irq_msix_misc(pf);
-       ice_clear_interrupt_scheme(pf);
-       ice_deinit_pf(pf);
-       ice_deinit_hw(&pf->hw);
-       pci_disable_pcie_error_reporting(pdev);
-}
-
-/* ice_pci_tbl - PCI Device ID Table
- *
- * Wildcard entries (PCI_ANY_ID) should come last
- * Last entry must be all 0s
- *
- * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
- *   Class, Class Mask, private data (not used) }
- */
-static const struct pci_device_id ice_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
-       { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
-       { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
-       { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 },
-       { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 },
-       /* required last entry */
-       { 0, }
-};
-MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
-
-static struct pci_driver ice_driver = {
-       .name = KBUILD_MODNAME,
-       .id_table = ice_pci_tbl,
-       .probe = ice_probe,
-       .remove = ice_remove,
-};
-
-/**
- * ice_module_init - Driver registration routine
- *
- * ice_module_init is the first routine called when the driver is
- * loaded. All it does is register with the PCI subsystem.
- */
-static int __init ice_module_init(void)
-{
-       int status;
-
-       pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
-       pr_info("%s\n", ice_copyright);
-
-       ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
-       if (!ice_wq) {
-               pr_err("Failed to create workqueue\n");
-               return -ENOMEM;
-       }
-
-       status = pci_register_driver(&ice_driver);
-       if (status) {
-               pr_err("failed to register pci driver, err %d\n", status);
-               destroy_workqueue(ice_wq);
-       }
-
-       return status;
-}
-module_init(ice_module_init);
-
-/**
- * ice_module_exit - Driver exit cleanup routine
- *
- * ice_module_exit is called just before the driver is removed
- * from memory.
- */
-static void __exit ice_module_exit(void)
-{
-       pci_unregister_driver(&ice_driver);
-       destroy_workqueue(ice_wq);
-       pr_info("module unloaded\n");
-}
-module_exit(ice_module_exit);
-
-/**
- * ice_set_mac_address - NDO callback to set mac address
- * @netdev: network interface device structure
- * @pi: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- */
-static int ice_set_mac_address(struct net_device *netdev, void *pi)
-{
-       struct ice_netdev_priv *np = netdev_priv(netdev);
-       struct ice_vsi *vsi = np->vsi;
-       struct ice_pf *pf = vsi->back;
-       struct ice_hw *hw = &pf->hw;
-       struct sockaddr *addr = pi;
-       enum ice_status status;
-       LIST_HEAD(a_mac_list);
-       LIST_HEAD(r_mac_list);
-       u8 flags = 0;
-       int err;
-       u8 *mac;
-
-       mac = (u8 *)addr->sa_data;
-
-       if (!is_valid_ether_addr(mac))
-               return -EADDRNOTAVAIL;
-
-       if (ether_addr_equal(netdev->dev_addr, mac)) {
-               netdev_warn(netdev, "already using mac %pM\n", mac);
-               return 0;
-       }
-
-       if (test_bit(__ICE_DOWN, pf->state) ||
-           ice_is_reset_recovery_pending(pf->state)) {
-               netdev_err(netdev, "can't set mac %pM. device not ready\n",
-                          mac);
-               return -EBUSY;
-       }
-
-       /* When we change the mac address we also have to change the mac address
-        * based filter rules that were created previously for the old mac
-        * address. So first, we remove the old filter rule using ice_remove_mac
-        * and then create a new filter rule using ice_add_mac. Note that for
-        * both these operations, we first need to form a "list" of mac
-        * addresses (even though in this case, we have only 1 mac address to be
-        * added/removed) and this done using ice_add_mac_to_list. Depending on
-        * the ensuing operation this "list" of mac addresses is either to be
-        * added or removed from the filter.
-        */
-       err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
-       if (err) {
-               err = -EADDRNOTAVAIL;
-               goto free_lists;
-       }
-
-       status = ice_remove_mac(hw, &r_mac_list);
-       if (status) {
-               err = -EADDRNOTAVAIL;
-               goto free_lists;
-       }
-
-       err = ice_add_mac_to_list(vsi, &a_mac_list, mac);
-       if (err) {
-               err = -EADDRNOTAVAIL;
-               goto free_lists;
-       }
-
-       status = ice_add_mac(hw, &a_mac_list);
-       if (status) {
-               err = -EADDRNOTAVAIL;
-               goto free_lists;
-       }
-
-free_lists:
-       /* free list entries */
-       ice_free_fltr_list(&pf->pdev->dev, &r_mac_list);
-       ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
-
-       if (err) {
-               netdev_err(netdev, "can't set mac %pM. filter update failed\n",
-                          mac);
-               return err;
-       }
-
-       /* change the netdev's mac address */
-       memcpy(netdev->dev_addr, mac, netdev->addr_len);
-       netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
-                  netdev->dev_addr);
-
-       /* write new mac address to the firmware */
-       flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
-       status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
-       if (status) {
-               netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n",
-                          mac);
-       }
-       return 0;
-}
-
-/**
- * ice_set_rx_mode - NDO callback to set the netdev filters
- * @netdev: network interface device structure
- */
-static void ice_set_rx_mode(struct net_device *netdev)
-{
-       struct ice_netdev_priv *np = netdev_priv(netdev);
-       struct ice_vsi *vsi = np->vsi;
-
-       if (!vsi)
-               return;
-
-       /* Set the flags to synchronize filters
-        * ndo_set_rx_mode may be triggered even without a change in netdev
-        * flags
-        */
-       set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
-       set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
-       set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
-
-       /* schedule our worker thread which will take care of
-        * applying the new filter changes
-        */
-       ice_service_task_schedule(vsi->back);
-}
-
-/**
- * ice_fdb_add - add an entry to the hardware database
- * @ndm: the input from the stack
- * @tb: pointer to array of nladdr (unused)
- * @dev: the net device pointer
- * @addr: the MAC address entry being added
- * @vid: VLAN id
- * @flags: instructions from stack about fdb operation
- */
-static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
-                      struct net_device *dev, const unsigned char *addr,
-                      u16 vid, u16 flags)
-{
-       int err;
-
-       if (vid) {
-               netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
-               return -EINVAL;
-       }
-       if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
-               netdev_err(dev, "FDB only supports static addresses\n");
-               return -EINVAL;
-       }
-
-       if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
-               err = dev_uc_add_excl(dev, addr);
-       else if (is_multicast_ether_addr(addr))
-               err = dev_mc_add_excl(dev, addr);
-       else
-               err = -EINVAL;
-
-       /* Only return duplicate errors if NLM_F_EXCL is set */
-       if (err == -EEXIST && !(flags & NLM_F_EXCL))
-               err = 0;
-
-       return err;
-}
-
-/**
- * ice_fdb_del - delete an entry from the hardware database
- * @ndm: the input from the stack
- * @tb: pointer to array of nladdr (unused)
- * @dev: the net device pointer
- * @addr: the MAC address entry being added
- * @vid: VLAN id
- */
-static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
-                      struct net_device *dev, const unsigned char *addr,
-                      __always_unused u16 vid)
-{
-       int err;
-
-       if (ndm->ndm_state & NUD_PERMANENT) {
-               netdev_err(dev, "FDB only supports static addresses\n");
-               return -EINVAL;
-       }
-
-       if (is_unicast_ether_addr(addr))
-               err = dev_uc_del(dev, addr);
-       else if (is_multicast_ether_addr(addr))
-               err = dev_mc_del(dev, addr);
-       else
-               err = -EINVAL;
-
-       return err;
-}
-
-/**
- * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
- * @vsi: the vsi being changed
- */
-static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
-{
-       struct device *dev = &vsi->back->pdev->dev;
-       struct ice_hw *hw = &vsi->back->hw;
-       struct ice_vsi_ctx ctxt = { 0 };
-       enum ice_status status;
-
-       /* Here we are configuring the VSI to let the driver add VLAN tags by
-        * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
-        * insertion happens in the Tx hot path, in ice_tx_map.
-        */
-       ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
-
-       ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
-       ctxt.vsi_num = vsi->vsi_num;
-
-       status = ice_aq_update_vsi(hw, &ctxt, NULL);
-       if (status) {
-               dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
-                       status, hw->adminq.sq_last_status);
-               return -EIO;
-       }
-
-       vsi->info.vlan_flags = ctxt.info.vlan_flags;
-       return 0;
-}
-
-/**
- * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
- * @vsi: the vsi being changed
- * @ena: boolean value indicating if this is a enable or disable request
- */
-static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
-{
-       struct device *dev = &vsi->back->pdev->dev;
-       struct ice_hw *hw = &vsi->back->hw;
-       struct ice_vsi_ctx ctxt = { 0 };
-       enum ice_status status;
-
-       /* Here we are configuring what the VSI should do with the VLAN tag in
-        * the Rx packet. We can either leave the tag in the packet or put it in
-        * the Rx descriptor.
-        */
-       if (ena) {
-               /* Strip VLAN tag from Rx packet and put it in the desc */
-               ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
-       } else {
-               /* Disable stripping. Leave tag in packet */
-               ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
-       }
-
-       /* Allow all packets untagged/tagged */
-       ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
-
-       ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
-       ctxt.vsi_num = vsi->vsi_num;
-
-       status = ice_aq_update_vsi(hw, &ctxt, NULL);
-       if (status) {
-               dev_err(dev, "update VSI for VALN strip failed, ena = %d err %d aq_err %d\n",
-                       ena, status, hw->adminq.sq_last_status);
-               return -EIO;
-       }
-
-       vsi->info.vlan_flags = ctxt.info.vlan_flags;
-       return 0;
-}
-
-/**
- * ice_set_features - set the netdev feature flags
- * @netdev: ptr to the netdev being adjusted
- * @features: the feature set that the stack is suggesting
- */
-static int ice_set_features(struct net_device *netdev,
-                           netdev_features_t features)
-{
-       struct ice_netdev_priv *np = netdev_priv(netdev);
-       struct ice_vsi *vsi = np->vsi;
-       int ret = 0;
-
-       if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
-           !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
-               ret = ice_vsi_manage_vlan_stripping(vsi, true);
-       else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
-                (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
-               ret = ice_vsi_manage_vlan_stripping(vsi, false);
-       else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
-                !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
-               ret = ice_vsi_manage_vlan_insertion(vsi);
-       else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
-                (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
-               ret = ice_vsi_manage_vlan_insertion(vsi);
-
-       return ret;
-}
-
-/**
- * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI
- * @vsi: VSI to setup vlan properties for
- */
-static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
-{
-       int ret = 0;
-
-       if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
-               ret = ice_vsi_manage_vlan_stripping(vsi, true);
-       if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
-               ret = ice_vsi_manage_vlan_insertion(vsi);
-
-       return ret;
-}
-
-/**
- * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
- * @vsi: the VSI being brought back up
- */
-static int ice_restore_vlan(struct ice_vsi *vsi)
-{
-       int err;
-       u16 vid;
-
-       if (!vsi->netdev)
-               return -EINVAL;
-
-       err = ice_vsi_vlan_setup(vsi);
-       if (err)
-               return err;
-
-       for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
-               err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
-               if (err)
-                       break;
-       }
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ *   Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id ice_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
+       /* required last entry */
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
 
-       return err;
-}
+static struct pci_driver ice_driver = {
+       .name = KBUILD_MODNAME,
+       .id_table = ice_pci_tbl,
+       .probe = ice_probe,
+       .remove = ice_remove,
+       .sriov_configure = ice_sriov_configure,
+};
 
 /**
- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
- * @ring: The Tx ring to configure
- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
- * @pf_q: queue index in the PF space
+ * ice_module_init - Driver registration routine
  *
- * Configure the Tx descriptor ring in TLAN context.
+ * ice_module_init is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
  */
-static void
-ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+static int __init ice_module_init(void)
 {
-       struct ice_vsi *vsi = ring->vsi;
-       struct ice_hw *hw = &vsi->back->hw;
-
-       tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
-
-       tlan_ctx->port_num = vsi->port_info->lport;
-
-       /* Transmit Queue Length */
-       tlan_ctx->qlen = ring->count;
+       int status;
 
-       /* PF number */
-       tlan_ctx->pf_num = hw->pf_id;
+       pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
+       pr_info("%s\n", ice_copyright);
 
-       /* queue belongs to a specific VSI type
-        * VF / VM index should be programmed per vmvf_type setting:
-        * for vmvf_type = VF, it is VF number between 0-256
-        * for vmvf_type = VM, it is VM number between 0-767
-        * for PF or EMP this field should be set to zero
-        */
-       switch (vsi->type) {
-       case ICE_VSI_PF:
-               tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
-               break;
-       default:
-               return;
+       ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
+       if (!ice_wq) {
+               pr_err("Failed to create workqueue\n");
+               return -ENOMEM;
        }
 
-       /* make sure the context is associated with the right VSI */
-       tlan_ctx->src_vsi = vsi->vsi_num;
-
-       tlan_ctx->tso_ena = ICE_TX_LEGACY;
-       tlan_ctx->tso_qnum = pf_q;
+       status = pci_register_driver(&ice_driver);
+       if (status) {
+               pr_err("failed to register pci driver, err %d\n", status);
+               destroy_workqueue(ice_wq);
+       }
 
-       /* Legacy or Advanced Host Interface:
-        * 0: Advanced Host Interface
-        * 1: Legacy Host Interface
-        */
-       tlan_ctx->legacy_int = ICE_TX_LEGACY;
+       return status;
 }
+module_init(ice_module_init);
 
 /**
- * ice_vsi_cfg_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
+ * ice_module_exit - Driver exit cleanup routine
  *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
+ * ice_module_exit is called just before the driver is removed
+ * from memory.
  */
-static int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
+static void __exit ice_module_exit(void)
 {
-       struct ice_aqc_add_tx_qgrp *qg_buf;
-       struct ice_aqc_add_txqs_perq *txq;
-       struct ice_pf *pf = vsi->back;
-       enum ice_status status;
-       u16 buf_len, i, pf_q;
-       int err = 0, tc = 0;
-       u8 num_q_grps;
-
-       buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
-       qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
-       if (!qg_buf)
-               return -ENOMEM;
-
-       if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
-               err = -EINVAL;
-               goto err_cfg_txqs;
-       }
-       qg_buf->num_txqs = 1;
-       num_q_grps = 1;
-
-       /* set up and configure the tx queues */
-       ice_for_each_txq(vsi, i) {
-               struct ice_tlan_ctx tlan_ctx = { 0 };
-
-               pf_q = vsi->txq_map[i];
-               ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
-               /* copy context contents into the qg_buf */
-               qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
-               ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
-                           ice_tlan_ctx_info);
-
-               /* init queue specific tail reg. It is referred as transmit
-                * comm scheduler queue doorbell.
-                */
-               vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
-               status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc,
-                                        num_q_grps, qg_buf, buf_len, NULL);
-               if (status) {
-                       dev_err(&vsi->back->pdev->dev,
-                               "Failed to set LAN Tx queue context, error: %d\n",
-                               status);
-                       err = -ENODEV;
-                       goto err_cfg_txqs;
-               }
-
-               /* Add Tx Queue TEID into the VSI tx ring from the response
-                * This will complete configuring and enabling the queue.
-                */
-               txq = &qg_buf->txqs[0];
-               if (pf_q == le16_to_cpu(txq->txq_id))
-                       vsi->tx_rings[i]->txq_teid =
-                               le32_to_cpu(txq->q_teid);
-       }
-err_cfg_txqs:
-       devm_kfree(&pf->pdev->dev, qg_buf);
-       return err;
+       pci_unregister_driver(&ice_driver);
+       destroy_workqueue(ice_wq);
+       pr_info("module unloaded\n");
 }
+module_exit(ice_module_exit);
 
 /**
- * ice_setup_rx_ctx - Configure a receive ring context
- * @ring: The Rx ring to configure
+ * ice_set_mac_address - NDO callback to set mac address
+ * @netdev: network interface device structure
+ * @pi: pointer to an address structure
  *
- * Configure the Rx descriptor ring in RLAN context.
+ * Returns 0 on success, negative on failure
  */
-static int ice_setup_rx_ctx(struct ice_ring *ring)
+static int ice_set_mac_address(struct net_device *netdev, void *pi)
 {
-       struct ice_vsi *vsi = ring->vsi;
-       struct ice_hw *hw = &vsi->back->hw;
-       u32 rxdid = ICE_RXDID_FLEX_NIC;
-       struct ice_rlan_ctx rlan_ctx;
-       u32 regval;
-       u16 pf_q;
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_vsi *vsi = np->vsi;
+       struct ice_pf *pf = vsi->back;
+       struct ice_hw *hw = &pf->hw;
+       struct sockaddr *addr = pi;
+       enum ice_status status;
+       LIST_HEAD(a_mac_list);
+       LIST_HEAD(r_mac_list);
+       u8 flags = 0;
        int err;
+       u8 *mac;
 
-       /* what is RX queue number in global space of 2K rx queues */
-       pf_q = vsi->rxq_map[ring->q_index];
-
-       /* clear the context structure first */
-       memset(&rlan_ctx, 0, sizeof(rlan_ctx));
-
-       rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
+       mac = (u8 *)addr->sa_data;
 
-       rlan_ctx.qlen = ring->count;
+       if (!is_valid_ether_addr(mac))
+               return -EADDRNOTAVAIL;
 
-       /* Receive Packet Data Buffer Size.
-        * The Packet Data Buffer Size is defined in 128 byte units.
-        */
-       rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
+       if (ether_addr_equal(netdev->dev_addr, mac)) {
+               netdev_warn(netdev, "already using mac %pM\n", mac);
+               return 0;
+       }
 
-       /* use 32 byte descriptors */
-       rlan_ctx.dsize = 1;
+       if (test_bit(__ICE_DOWN, pf->state) ||
+           ice_is_reset_in_progress(pf->state)) {
+               netdev_err(netdev, "can't set mac %pM. device not ready\n",
+                          mac);
+               return -EBUSY;
+       }
 
-       /* Strip the Ethernet CRC bytes before the packet is posted to host
-        * memory.
+       /* When we change the mac address we also have to change the mac address
+        * based filter rules that were created previously for the old mac
+        * address. So first, we remove the old filter rule using ice_remove_mac
+        * and then create a new filter rule using ice_add_mac. Note that for
+        * both these operations, we first need to form a "list" of mac
+        * addresses (even though in this case, we have only 1 mac address to be
+        * added/removed) and this done using ice_add_mac_to_list. Depending on
+        * the ensuing operation this "list" of mac addresses is either to be
+        * added or removed from the filter.
         */
-       rlan_ctx.crcstrip = 1;
-
-       /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
-       rlan_ctx.l2tsel = 1;
+       err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
+       if (err) {
+               err = -EADDRNOTAVAIL;
+               goto free_lists;
+       }
 
-       rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
-       rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
-       rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
+       status = ice_remove_mac(hw, &r_mac_list);
+       if (status) {
+               err = -EADDRNOTAVAIL;
+               goto free_lists;
+       }
 
-       /* This controls whether VLAN is stripped from inner headers
-        * The VLAN in the inner L2 header is stripped to the receive
-        * descriptor if enabled by this flag.
-        */
-       rlan_ctx.showiv = 0;
+       err = ice_add_mac_to_list(vsi, &a_mac_list, mac);
+       if (err) {
+               err = -EADDRNOTAVAIL;
+               goto free_lists;
+       }
 
-       /* Max packet size for this queue - must not be set to a larger value
-        * than 5 x DBUF
-        */
-       rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
-                              ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
-
-       /* Rx queue threshold in units of 64 */
-       rlan_ctx.lrxqthresh = 1;
-
-        /* Enable Flexible Descriptors in the queue context which
-         * allows this driver to select a specific receive descriptor format
-         */
-       regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
-       regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
-               QRXFLXP_CNTXT_RXDID_IDX_M;
-
-       /* increasing context priority to pick up profile id;
-        * default is 0x01; setting to 0x03 to ensure profile
-        * is programming if prev context is of same priority
-        */
-       regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
-               QRXFLXP_CNTXT_RXDID_PRIO_M;
+       status = ice_add_mac(hw, &a_mac_list);
+       if (status) {
+               err = -EADDRNOTAVAIL;
+               goto free_lists;
+       }
 
-       wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+free_lists:
+       /* free list entries */
+       ice_free_fltr_list(&pf->pdev->dev, &r_mac_list);
+       ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
 
-       /* Absolute queue number out of 2K needs to be passed */
-       err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
        if (err) {
-               dev_err(&vsi->back->pdev->dev,
-                       "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
-                       pf_q, err);
-               return -EIO;
+               netdev_err(netdev, "can't set mac %pM. filter update failed\n",
+                          mac);
+               return err;
        }
 
-       /* init queue specific tail register */
-       ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
-       writel(0, ring->tail);
-       ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
+       /* change the netdev's mac address */
+       memcpy(netdev->dev_addr, mac, netdev->addr_len);
+       netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
+                  netdev->dev_addr);
 
+       /* write new mac address to the firmware */
+       flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
+       status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
+       if (status) {
+               netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n",
+                          mac);
+       }
        return 0;
 }
 
 /**
- * ice_vsi_cfg_rxqs - Configure the VSI for Rx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Rx VSI for operation.
+ * ice_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
  */
-static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+static void ice_set_rx_mode(struct net_device *netdev)
 {
-       int err = 0;
-       u16 i;
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_vsi *vsi = np->vsi;
 
-       if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
-               vsi->max_frame = vsi->netdev->mtu +
-                       ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-       else
-               vsi->max_frame = ICE_RXBUF_2048;
+       if (!vsi)
+               return;
 
-       vsi->rx_buf_len = ICE_RXBUF_2048;
-       /* set up individual rings */
-       for (i = 0; i < vsi->num_rxq && !err; i++)
-               err = ice_setup_rx_ctx(vsi->rx_rings[i]);
+       /* Set the flags to synchronize filters
+        * ndo_set_rx_mode may be triggered even without a change in netdev
+        * flags
+        */
+       set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
+       set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
+       set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
 
-       if (err) {
-               dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
-               return -EIO;
-       }
-       return err;
+       /* schedule our worker thread which will take care of
+        * applying the new filter changes
+        */
+       ice_service_task_schedule(vsi->back);
 }
 
 /**
- * ice_vsi_cfg - Setup the VSI
- * @vsi: the VSI being configured
- *
- * Return 0 on success and negative value on error
+ * ice_fdb_add - add an entry to the hardware database
+ * @ndm: the input from the stack
+ * @tb: pointer to array of nladdr (unused)
+ * @dev: the net device pointer
+ * @addr: the MAC address entry being added
+ * @vid: VLAN id
+ * @flags: instructions from stack about fdb operation
  */
-static int ice_vsi_cfg(struct ice_vsi *vsi)
+static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
+                      struct net_device *dev, const unsigned char *addr,
+                      u16 vid, u16 flags)
 {
        int err;
 
-       if (vsi->netdev) {
-               ice_set_rx_mode(vsi->netdev);
-               err = ice_restore_vlan(vsi);
-               if (err)
-                       return err;
+       if (vid) {
+               netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
+               return -EINVAL;
+       }
+       if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+               netdev_err(dev, "FDB only supports static addresses\n");
+               return -EINVAL;
        }
 
-       err = ice_vsi_cfg_txqs(vsi);
-       if (!err)
-               err = ice_vsi_cfg_rxqs(vsi);
+       if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+               err = dev_uc_add_excl(dev, addr);
+       else if (is_multicast_ether_addr(addr))
+               err = dev_mc_add_excl(dev, addr);
+       else
+               err = -EINVAL;
+
+       /* Only return duplicate errors if NLM_F_EXCL is set */
+       if (err == -EEXIST && !(flags & NLM_F_EXCL))
+               err = 0;
 
        return err;
 }
 
 /**
- * ice_vsi_stop_tx_rings - Disable Tx rings
- * @vsi: the VSI being configured
+ * ice_fdb_del - delete an entry from the hardware database
+ * @ndm: the input from the stack
+ * @tb: pointer to array of nladdr (unused)
+ * @dev: the net device pointer
+ * @addr: the MAC address entry being added
+ * @vid: VLAN id
  */
-static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
+static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
+                      struct net_device *dev, const unsigned char *addr,
+                      __always_unused u16 vid)
 {
-       struct ice_pf *pf = vsi->back;
-       struct ice_hw *hw = &pf->hw;
-       enum ice_status status;
-       u32 *q_teids, val;
-       u16 *q_ids, i;
-       int err = 0;
-
-       if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
-               return -EINVAL;
-
-       q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
-                              GFP_KERNEL);
-       if (!q_teids)
-               return -ENOMEM;
-
-       q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
-                            GFP_KERNEL);
-       if (!q_ids) {
-               err = -ENOMEM;
-               goto err_alloc_q_ids;
-       }
-
-       /* set up the tx queue list to be disabled */
-       ice_for_each_txq(vsi, i) {
-               u16 v_idx;
-
-               if (!vsi->tx_rings || !vsi->tx_rings[i]) {
-                       err = -EINVAL;
-                       goto err_out;
-               }
-
-               q_ids[i] = vsi->txq_map[i];
-               q_teids[i] = vsi->tx_rings[i]->txq_teid;
-
-               /* clear cause_ena bit for disabled queues */
-               val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
-               val &= ~QINT_TQCTL_CAUSE_ENA_M;
-               wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
-
-               /* software is expected to wait for 100 ns */
-               ndelay(100);
+       int err;
 
-               /* trigger a software interrupt for the vector associated to
-                * the queue to schedule napi handler
-                */
-               v_idx = vsi->tx_rings[i]->q_vector->v_idx;
-               wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx),
-                    GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
-       }
-       status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
-                                NULL);
-       /* if the disable queue command was exercised during an active reset
-        * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
-        * the reset operation disables queues at the hardware level anyway.
-        */
-       if (status == ICE_ERR_RESET_ONGOING) {
-               dev_dbg(&pf->pdev->dev,
-                       "Reset in progress. LAN Tx queues already disabled\n");
-       } else if (status) {
-               dev_err(&pf->pdev->dev,
-                       "Failed to disable LAN Tx queues, error: %d\n",
-                       status);
-               err = -ENODEV;
+       if (ndm->ndm_state & NUD_PERMANENT) {
+               netdev_err(dev, "FDB only supports static addresses\n");
+               return -EINVAL;
        }
 
-err_out:
-       devm_kfree(&pf->pdev->dev, q_ids);
-
-err_alloc_q_ids:
-       devm_kfree(&pf->pdev->dev, q_teids);
+       if (is_unicast_ether_addr(addr))
+               err = dev_uc_del(dev, addr);
+       else if (is_multicast_ether_addr(addr))
+               err = dev_mc_del(dev, addr);
+       else
+               err = -EINVAL;
 
        return err;
 }
 
 /**
- * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
- * @pf: the PF being configured
- * @pf_q: the PF queue
- * @ena: enable or disable state of the queue
- *
- * This routine will wait for the given Rx queue of the PF to reach the
- * enabled or disabled state.
- * Returns -ETIMEDOUT in case of failing to reach the requested state after
- * multiple retries; else will return 0 in case of success.
+ * ice_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
  */
-static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
+static int ice_set_features(struct net_device *netdev,
+                           netdev_features_t features)
 {
-       int i;
-
-       for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
-               u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_vsi *vsi = np->vsi;
+       int ret = 0;
 
-               if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
-                       break;
+       if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
+               ret = ice_vsi_manage_rss_lut(vsi, true);
+       else if (!(features & NETIF_F_RXHASH) &&
+                netdev->features & NETIF_F_RXHASH)
+               ret = ice_vsi_manage_rss_lut(vsi, false);
 
-               usleep_range(10, 20);
-       }
-       if (i >= ICE_Q_WAIT_RETRY_LIMIT)
-               return -ETIMEDOUT;
+       if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+           !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
+               ret = ice_vsi_manage_vlan_stripping(vsi, true);
+       else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
+                (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
+               ret = ice_vsi_manage_vlan_stripping(vsi, false);
+       else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
+                !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
+               ret = ice_vsi_manage_vlan_insertion(vsi);
+       else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
+                (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
+               ret = ice_vsi_manage_vlan_insertion(vsi);
 
-       return 0;
+       return ret;
 }
 
 /**
- * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings
- * @vsi: the VSI being configured
- * @ena: start or stop the rx rings
+ * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI
+ * @vsi: VSI to setup vlan properties for
  */
-static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
+static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
 {
-       struct ice_pf *pf = vsi->back;
-       struct ice_hw *hw = &pf->hw;
-       int i, j, ret = 0;
-
-       for (i = 0; i < vsi->num_rxq; i++) {
-               int pf_q = vsi->rxq_map[i];
-               u32 rx_reg;
-
-               for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
-                       rx_reg = rd32(hw, QRX_CTRL(pf_q));
-                       if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
-                           ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
-                               break;
-                       usleep_range(1000, 2000);
-               }
-
-               /* Skip if the queue is already in the requested state */
-               if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
-                       continue;
-
-               /* turn on/off the queue */
-               if (ena)
-                       rx_reg |= QRX_CTRL_QENA_REQ_M;
-               else
-                       rx_reg &= ~QRX_CTRL_QENA_REQ_M;
-               wr32(hw, QRX_CTRL(pf_q), rx_reg);
+       int ret = 0;
 
-               /* wait for the change to finish */
-               ret = ice_pf_rxq_wait(pf, pf_q, ena);
-               if (ret) {
-                       dev_err(&pf->pdev->dev,
-                               "VSI idx %d Rx ring %d %sable timeout\n",
-                               vsi->idx, pf_q, (ena ? "en" : "dis"));
-                       break;
-               }
-       }
+       if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+               ret = ice_vsi_manage_vlan_stripping(vsi, true);
+       if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
+               ret = ice_vsi_manage_vlan_insertion(vsi);
 
        return ret;
 }
 
 /**
- * ice_vsi_start_rx_rings - start VSI's rx rings
- * @vsi: the VSI whose rings are to be started
- *
- * Returns 0 on success and a negative value on error
+ * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
+ * @vsi: the VSI being brought back up
  */
-static int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
+static int ice_restore_vlan(struct ice_vsi *vsi)
 {
-       return ice_vsi_ctrl_rx_rings(vsi, true);
-}
+       int err;
+       u16 vid;
 
-/**
- * ice_vsi_stop_rx_rings - stop VSI's rx rings
- * @vsi: the VSI
- *
- * Returns 0 on success and a negative value on error
- */
-static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
-{
-       return ice_vsi_ctrl_rx_rings(vsi, false);
+       if (!vsi->netdev)
+               return -EINVAL;
+
+       err = ice_vsi_vlan_setup(vsi);
+       if (err)
+               return err;
+
+       for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
+               err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
+               if (err)
+                       break;
+       }
+
+       return err;
 }
 
 /**
- * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings
- * @vsi: the VSI
- * Returns 0 on success and a negative value on error
+ * ice_vsi_cfg - Setup the VSI
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and negative value on error
  */
-static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi)
+static int ice_vsi_cfg(struct ice_vsi *vsi)
 {
-       int err_tx, err_rx;
-
-       err_tx = ice_vsi_stop_tx_rings(vsi);
-       if (err_tx)
-               dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n");
+       int err;
 
-       err_rx = ice_vsi_stop_rx_rings(vsi);
-       if (err_rx)
-               dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n");
+       if (vsi->netdev) {
+               ice_set_rx_mode(vsi->netdev);
+               err = ice_restore_vlan(vsi);
+               if (err)
+                       return err;
+       }
 
-       if (err_tx || err_rx)
-               return -EIO;
+       err = ice_vsi_cfg_txqs(vsi);
+       if (!err)
+               err = ice_vsi_cfg_rxqs(vsi);
 
-       return 0;
+       return err;
 }
 
 /**
@@ -4729,122 +2716,6 @@ static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,
        } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
 }
 
-/**
- * ice_stat_update40 - read 40 bit stat from the chip and update stat values
- * @hw: ptr to the hardware info
- * @hireg: high 32 bit HW register to read from
- * @loreg: low 32 bit HW register to read from
- * @prev_stat_loaded: bool to specify if previous stats are loaded
- * @prev_stat: ptr to previous loaded stat value
- * @cur_stat: ptr to current stat value
- */
-static void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
-                             bool prev_stat_loaded, u64 *prev_stat,
-                             u64 *cur_stat)
-{
-       u64 new_data;
-
-       new_data = rd32(hw, loreg);
-       new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
-
-       /* device stats are not reset at PFR, they likely will not be zeroed
-        * when the driver starts. So save the first values read and use them as
-        * offsets to be subtracted from the raw values in order to report stats
-        * that count from zero.
-        */
-       if (!prev_stat_loaded)
-               *prev_stat = new_data;
-       if (likely(new_data >= *prev_stat))
-               *cur_stat = new_data - *prev_stat;
-       else
-               /* to manage the potential roll-over */
-               *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
-       *cur_stat &= 0xFFFFFFFFFFULL;
-}
-
-/**
- * ice_stat_update32 - read 32 bit stat from the chip and update stat values
- * @hw: ptr to the hardware info
- * @reg: HW register to read from
- * @prev_stat_loaded: bool to specify if previous stats are loaded
- * @prev_stat: ptr to previous loaded stat value
- * @cur_stat: ptr to current stat value
- */
-static void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
-                             u64 *prev_stat, u64 *cur_stat)
-{
-       u32 new_data;
-
-       new_data = rd32(hw, reg);
-
-       /* device stats are not reset at PFR, they likely will not be zeroed
-        * when the driver starts. So save the first values read and use them as
-        * offsets to be subtracted from the raw values in order to report stats
-        * that count from zero.
-        */
-       if (!prev_stat_loaded)
-               *prev_stat = new_data;
-       if (likely(new_data >= *prev_stat))
-               *cur_stat = new_data - *prev_stat;
-       else
-               /* to manage the potential roll-over */
-               *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
-}
-
-/**
- * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
- * @vsi: the VSI to be updated
- */
-static void ice_update_eth_stats(struct ice_vsi *vsi)
-{
-       struct ice_eth_stats *prev_es, *cur_es;
-       struct ice_hw *hw = &vsi->back->hw;
-       u16 vsi_num = vsi->vsi_num;    /* HW absolute index of a VSI */
-
-       prev_es = &vsi->eth_stats_prev;
-       cur_es = &vsi->eth_stats;
-
-       ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
-                         vsi->stat_offsets_loaded, &prev_es->rx_bytes,
-                         &cur_es->rx_bytes);
-
-       ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
-                         vsi->stat_offsets_loaded, &prev_es->rx_unicast,
-                         &cur_es->rx_unicast);
-
-       ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
-                         vsi->stat_offsets_loaded, &prev_es->rx_multicast,
-                         &cur_es->rx_multicast);
-
-       ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
-                         vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
-                         &cur_es->rx_broadcast);
-
-       ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
-                         &prev_es->rx_discards, &cur_es->rx_discards);
-
-       ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
-                         vsi->stat_offsets_loaded, &prev_es->tx_bytes,
-                         &cur_es->tx_bytes);
-
-       ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
-                         vsi->stat_offsets_loaded, &prev_es->tx_unicast,
-                         &cur_es->tx_unicast);
-
-       ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
-                         vsi->stat_offsets_loaded, &prev_es->tx_multicast,
-                         &cur_es->tx_multicast);
-
-       ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
-                         vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
-                         &cur_es->tx_broadcast);
-
-       ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
-                         &prev_es->tx_errors, &cur_es->tx_errors);
-
-       vsi->stat_offsets_loaded = true;
-}
-
 /**
  * ice_update_vsi_ring_stats - Update VSI stats counters
  * @vsi: the VSI to be updated
@@ -5138,7 +3009,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
  */
 int ice_down(struct ice_vsi *vsi)
 {
-       int i, err;
+       int i, tx_err, rx_err;
 
        /* Caller of this function is expected to set the
         * vsi->state __ICE_DOWN bit
@@ -5149,7 +3020,18 @@ int ice_down(struct ice_vsi *vsi)
        }
 
        ice_vsi_dis_irq(vsi);
-       err = ice_vsi_stop_tx_rx_rings(vsi);
+       tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0);
+       if (tx_err)
+               netdev_err(vsi->netdev,
+                          "Failed stop Tx rings, VSI %d error %d\n",
+                          vsi->vsi_num, tx_err);
+
+       rx_err = ice_vsi_stop_rx_rings(vsi);
+       if (rx_err)
+               netdev_err(vsi->netdev,
+                          "Failed stop Rx rings, VSI %d error %d\n",
+                          vsi->vsi_num, rx_err);
+
        ice_napi_disable_all(vsi);
 
        ice_for_each_txq(vsi, i)
@@ -5158,10 +3040,14 @@ int ice_down(struct ice_vsi *vsi)
        ice_for_each_rxq(vsi, i)
                ice_clean_rx_ring(vsi->rx_rings[i]);
 
-       if (err)
-               netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
+       if (tx_err || rx_err) {
+               netdev_err(vsi->netdev,
+                          "Failed to close VSI 0x%04X on switch 0x%04X\n",
                           vsi->vsi_num, vsi->vsw->sw_id);
-       return err;
+               return -EIO;
+       }
+
+       return 0;
 }
 
 /**
@@ -5181,6 +3067,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
        }
 
        ice_for_each_txq(vsi, i) {
+               vsi->tx_rings[i]->netdev = vsi->netdev;
                err = ice_setup_tx_ring(vsi->tx_rings[i]);
                if (err)
                        break;
@@ -5206,6 +3093,7 @@ static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
        }
 
        ice_for_each_rxq(vsi, i) {
+               vsi->rx_rings[i]->netdev = vsi->netdev;
                err = ice_setup_rx_ring(vsi->rx_rings[i]);
                if (err)
                        break;
@@ -5232,38 +3120,6 @@ static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
        return err;
 }
 
-/**
- * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
- * @vsi: the VSI having resources freed
- */
-static void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
-{
-       int i;
-
-       if (!vsi->tx_rings)
-               return;
-
-       ice_for_each_txq(vsi, i)
-               if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
-                       ice_free_tx_ring(vsi->tx_rings[i]);
-}
-
-/**
- * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
- * @vsi: the VSI having resources freed
- */
-static void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
-{
-       int i;
-
-       if (!vsi->rx_rings)
-               return;
-
-       ice_for_each_rxq(vsi, i)
-               if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
-                       ice_free_rx_ring(vsi->rx_rings[i]);
-}
-
 /**
  * ice_vsi_open - Called when a network interface is made active
  * @vsi: the VSI to open
@@ -5324,92 +3180,6 @@ err_setup_tx:
        return err;
 }
 
-/**
- * ice_vsi_close - Shut down a VSI
- * @vsi: the VSI being shut down
- */
-static void ice_vsi_close(struct ice_vsi *vsi)
-{
-       if (!test_and_set_bit(__ICE_DOWN, vsi->state))
-               ice_down(vsi);
-
-       ice_vsi_free_irq(vsi);
-       ice_vsi_free_tx_rings(vsi);
-       ice_vsi_free_rx_rings(vsi);
-}
-
-/**
- * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
- * @vsi: the VSI being removed
- */
-static void ice_rss_clean(struct ice_vsi *vsi)
-{
-       struct ice_pf *pf;
-
-       pf = vsi->back;
-
-       if (vsi->rss_hkey_user)
-               devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
-       if (vsi->rss_lut_user)
-               devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
-}
-
-/**
- * ice_vsi_release - Delete a VSI and free its resources
- * @vsi: the VSI being removed
- *
- * Returns 0 on success or < 0 on error
- */
-static int ice_vsi_release(struct ice_vsi *vsi)
-{
-       struct ice_pf *pf;
-
-       if (!vsi->back)
-               return -ENODEV;
-       pf = vsi->back;
-       /* do not unregister and free netdevs while driver is in the reset
-        * recovery pending state. Since reset/rebuild happens through PF
-        * service task workqueue, its not a good idea to unregister netdev
-        * that is associated to the PF that is running the work queue items
-        * currently. This is done to avoid check_flush_dependency() warning
-        * on this wq
-        */
-       if (vsi->netdev && !ice_is_reset_recovery_pending(pf->state)) {
-               unregister_netdev(vsi->netdev);
-               free_netdev(vsi->netdev);
-               vsi->netdev = NULL;
-       }
-
-       if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
-               ice_rss_clean(vsi);
-
-       /* Disable VSI and free resources */
-       ice_vsi_dis_irq(vsi);
-       ice_vsi_close(vsi);
-
-       /* reclaim interrupt vectors back to PF */
-       ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
-       pf->num_avail_msix += vsi->num_q_vectors;
-
-       ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num);
-       ice_vsi_delete(vsi);
-       ice_vsi_free_q_vectors(vsi);
-       ice_vsi_clear_rings(vsi);
-
-       ice_vsi_put_qs(vsi);
-       pf->q_left_tx += vsi->alloc_txq;
-       pf->q_left_rx += vsi->alloc_rxq;
-
-       /* retain SW VSI data structure since it is needed to unregister and
-        * free VSI netdev when PF is not in reset recovery pending state,\
-        * for ex: during rmmod.
-        */
-       if (!ice_is_reset_recovery_pending(pf->state))
-               ice_vsi_clear(vsi);
-
-       return 0;
-}
-
 /**
  * ice_vsi_release_all - Delete all VSIs
  * @pf: PF from which all VSIs are being removed
@@ -5444,13 +3214,14 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
 
        set_bit(__ICE_NEEDS_RESTART, vsi->state);
 
-       if (vsi->netdev && netif_running(vsi->netdev) &&
-           vsi->type == ICE_VSI_PF) {
-               rtnl_lock();
-               vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
-               rtnl_unlock();
-       } else {
-               ice_vsi_close(vsi);
+       if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+               if (netif_running(vsi->netdev)) {
+                       rtnl_lock();
+                       vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+                       rtnl_unlock();
+               } else {
+                       ice_vsi_close(vsi);
+               }
        }
 }
 
@@ -5462,12 +3233,16 @@ static int ice_ena_vsi(struct ice_vsi *vsi)
 {
        int err = 0;
 
-       if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state))
-               if (vsi->netdev && netif_running(vsi->netdev)) {
+       if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
+           vsi->netdev) {
+               if (netif_running(vsi->netdev)) {
                        rtnl_lock();
                        err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
                        rtnl_unlock();
+               } else {
+                       err = ice_vsi_open(vsi);
                }
+       }
 
        return err;
 }
@@ -5516,6 +3291,10 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
                if (!pf->vsi[i])
                        continue;
 
+               /* VF VSI rebuild isn't supported yet */
+               if (pf->vsi[i]->type == ICE_VSI_VF)
+                       continue;
+
                err = ice_vsi_rebuild(pf->vsi[i]);
                if (err) {
                        dev_err(&pf->pdev->dev,
@@ -5532,6 +3311,44 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
        return 0;
 }
 
+/**
+ * ice_vsi_replay_all - replay all VSIs configuration in the PF
+ * @pf: the PF
+ */
+static int ice_vsi_replay_all(struct ice_pf *pf)
+{
+       struct ice_hw *hw = &pf->hw;
+       enum ice_status ret;
+       int i;
+
+       /* loop through pf->vsi array and replay the VSI if found */
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               if (!pf->vsi[i])
+                       continue;
+
+               ret = ice_replay_vsi(hw, pf->vsi[i]->idx);
+               if (ret) {
+                       dev_err(&pf->pdev->dev,
+                               "VSI at index %d replay failed %d\n",
+                               pf->vsi[i]->idx, ret);
+                       return -EIO;
+               }
+
+               /* Re-map HW VSI number, using VSI handle that has been
+                * previously validated in ice_replay_vsi() call above
+                */
+               pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx);
+
+               dev_info(&pf->pdev->dev,
+                        "VSI at index %d filter replayed successfully - vsi_num %i\n",
+                        pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
+       }
+
+       /* Clean up replay filter after successful re-configuration */
+       ice_replay_post(hw);
+       return 0;
+}
+
 /**
  * ice_rebuild - rebuild after reset
  * @pf: pf to rebuild
@@ -5572,16 +3389,26 @@ static void ice_rebuild(struct ice_pf *pf)
        if (err)
                goto err_sched_init_port;
 
+       /* reset search_hint of irq_trackers to 0 since interrupts are
+        * reclaimed and could be allocated from beginning during VSI rebuild
+        */
+       pf->sw_irq_tracker->search_hint = 0;
+       pf->hw_irq_tracker->search_hint = 0;
+
        err = ice_vsi_rebuild_all(pf);
        if (err) {
                dev_err(dev, "ice_vsi_rebuild_all failed\n");
                goto err_vsi_rebuild;
        }
 
-       ret = ice_replay_all_fltr(&pf->hw);
-       if (ret) {
+       err = ice_update_link_info(hw->port_info);
+       if (err)
+               dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
+
+       /* Replay all VSIs Configuration, including filters after reset */
+       if (ice_vsi_replay_all(pf)) {
                dev_err(&pf->pdev->dev,
-                       "error replaying switch filter rules\n");
+                       "error replaying VSI configurations with switch filter rules\n");
                goto err_vsi_rebuild;
        }
 
@@ -5604,6 +3431,7 @@ static void ice_rebuild(struct ice_pf *pf)
                goto err_vsi_rebuild;
        }
 
+       ice_reset_all_vfs(pf, true);
        /* if we get here, reset flow is successful */
        clear_bit(__ICE_RESET_FAILED, pf->state);
        return;
@@ -5651,7 +3479,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
        }
        /* if a reset is in progress, wait for some time for it to complete */
        do {
-               if (ice_is_reset_recovery_pending(pf->state)) {
+               if (ice_is_reset_in_progress(pf->state)) {
                        count++;
                        usleep_range(1000, 2000);
                } else {
@@ -5707,7 +3535,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
                struct ice_aqc_get_set_rss_keys *buf =
                                  (struct ice_aqc_get_set_rss_keys *)seed;
 
-               status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf);
+               status = ice_aq_set_rss_key(hw, vsi->idx, buf);
 
                if (status) {
                        dev_err(&pf->pdev->dev,
@@ -5718,8 +3546,8 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
        }
 
        if (lut) {
-               status = ice_aq_set_rss_lut(hw, vsi->vsi_num,
-                                           vsi->rss_lut_type, lut, lut_size);
+               status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
+                                           lut, lut_size);
                if (status) {
                        dev_err(&pf->pdev->dev,
                                "Cannot set RSS lut, err %d aq_err %d\n",
@@ -5750,7 +3578,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
                struct ice_aqc_get_set_rss_keys *buf =
                                  (struct ice_aqc_get_set_rss_keys *)seed;
 
-               status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf);
+               status = ice_aq_get_rss_key(hw, vsi->idx, buf);
                if (status) {
                        dev_err(&pf->pdev->dev,
                                "Cannot get RSS key, err %d aq_err %d\n",
@@ -5760,8 +3588,8 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
        }
 
        if (lut) {
-               status = ice_aq_get_rss_lut(hw, vsi->vsi_num,
-                                           vsi->rss_lut_type, lut, lut_size);
+               status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
+                                           lut, lut_size);
                if (status) {
                        dev_err(&pf->pdev->dev,
                                "Cannot get RSS lut, err %d aq_err %d\n",
@@ -5823,9 +3651,9 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
        else
                /* change from VEB to VEPA mode */
                ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
-       ctxt.vsi_num = vsi->vsi_num;
        ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
-       status = ice_aq_update_vsi(hw, &ctxt, NULL);
+
+       status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
        if (status) {
                dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
                        bmode, status, hw->adminq.sq_last_status);
@@ -5965,7 +3793,7 @@ static void ice_tx_timeout(struct net_device *netdev)
                if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
                        val = rd32(&pf->hw,
                                   GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
-                                               tx_ring->vsi->base_vector - 1));
+                                       tx_ring->vsi->hw_base_vector));
 
                netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
                            vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
@@ -6112,6 +3940,12 @@ static const struct net_device_ops ice_netdev_ops = {
        .ndo_validate_addr = eth_validate_addr,
        .ndo_change_mtu = ice_change_mtu,
        .ndo_get_stats64 = ice_get_stats64,
+       .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
+       .ndo_set_vf_mac = ice_set_vf_mac,
+       .ndo_get_vf_config = ice_get_vf_cfg,
+       .ndo_set_vf_trust = ice_set_vf_trust,
+       .ndo_set_vf_vlan = ice_set_vf_port_vlan,
+       .ndo_set_vf_link_state = ice_set_vf_link_state,
        .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
        .ndo_set_features = ice_set_features,
index 9b7b505549522c02310f6e1d8a4f0cf11df5c950..7cc8aa18a22bb5c062d74d2178ee60b9d29ee72c 100644 (file)
@@ -84,6 +84,62 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
        return NULL;
 }
 
+/**
+ * ice_aq_query_sched_elems - query scheduler elements
+ * @hw: pointer to the hw struct
+ * @elems_req: number of elements to query
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_ret: returns total number of elements returned
+ * @cd: pointer to command details structure or NULL
+ *
+ * Query scheduling elements (0x0404)
+ */
+static enum ice_status
+ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
+                        struct ice_aqc_get_elem *buf, u16 buf_size,
+                        u16 *elems_ret, struct ice_sq_cd *cd)
+{
+       struct ice_aqc_get_cfg_elem *cmd;
+       struct ice_aq_desc desc;
+       enum ice_status status;
+
+       cmd = &desc.params.get_update_elem;
+       ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sched_elems);
+       cmd->num_elem_req = cpu_to_le16(elems_req);
+       desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+       status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+       if (!status && elems_ret)
+               *elems_ret = le16_to_cpu(cmd->num_elem_resp);
+
+       return status;
+}
+
+/**
+ * ice_sched_query_elem - query element information from hw
+ * @hw: pointer to the hw struct
+ * @node_teid: node teid to be queried
+ * @buf: buffer to element information
+ *
+ * This function queries HW element information
+ */
+static enum ice_status
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
+                    struct ice_aqc_get_elem *buf)
+{
+       u16 buf_size, num_elem_ret = 0;
+       enum ice_status status;
+
+       buf_size = sizeof(*buf);
+       memset(buf, 0, buf_size);
+       buf->generic[0].node_teid = cpu_to_le32(node_teid);
+       status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
+                                         NULL);
+       if (status || num_elem_ret != 1)
+               ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
+       return status;
+}
+
 /**
  * ice_sched_add_node - Insert the Tx scheduler node in SW DB
  * @pi: port information structure
@@ -97,7 +153,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
                   struct ice_aqc_txsched_elem_data *info)
 {
        struct ice_sched_node *parent;
+       struct ice_aqc_get_elem elem;
        struct ice_sched_node *node;
+       enum ice_status status;
        struct ice_hw *hw;
 
        if (!pi)
@@ -115,6 +173,13 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
                return ICE_ERR_PARAM;
        }
 
+       /* query the current node information from FW  before additing it
+        * to the SW DB
+        */
+       status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
+       if (status)
+               return status;
+
        node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
        if (!node)
                return ICE_ERR_NO_MEMORY;
@@ -133,7 +198,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
        node->parent = parent;
        node->tx_sched_layer = layer;
        parent->children[parent->num_children++] = node;
-       memcpy(&node->info, info, sizeof(*info));
+       memcpy(&node->info, &elem.generic[0], sizeof(node->info));
        return 0;
 }
 
@@ -534,9 +599,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
 static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
 {
        struct ice_sched_agg_info *agg_info;
-       struct ice_sched_vsi_info *vsi_elem;
        struct ice_sched_agg_info *atmp;
-       struct ice_sched_vsi_info *tmp;
        struct ice_hw *hw;
 
        if (!pi)
@@ -555,13 +618,6 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
                }
        }
 
-       /* remove the vsi list */
-       list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list,
-                                list_entry) {
-               list_del(&vsi_elem->list_entry);
-               devm_kfree(ice_hw_to_dev(hw), vsi_elem);
-       }
-
        if (pi->root) {
                ice_free_sched_node(pi, pi->root);
                pi->root = NULL;
@@ -611,31 +667,6 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
        hw->max_cgds = 0;
 }
 
-/**
- * ice_sched_create_vsi_info_entry - create an empty new VSI entry
- * @pi: port information structure
- * @vsi_id: VSI Id
- *
- * This function creates a new VSI entry and adds it to list
- */
-static struct ice_sched_vsi_info *
-ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
-{
-       struct ice_sched_vsi_info *vsi_elem;
-
-       if (!pi)
-               return NULL;
-
-       vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem),
-                               GFP_KERNEL);
-       if (!vsi_elem)
-               return NULL;
-
-       list_add(&vsi_elem->list_entry, &pi->vsi_info_list);
-       vsi_elem->vsi_id = vsi_id;
-       return vsi_elem;
-}
-
 /**
  * ice_sched_add_elems - add nodes to hw and SW DB
  * @pi: port information structure
@@ -1007,7 +1038,6 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
        pi->port_state = ICE_SCHED_PORT_STATE_READY;
        mutex_init(&pi->sched_lock);
        INIT_LIST_HEAD(&pi->agg_list);
-       INIT_LIST_HEAD(&pi->vsi_info_list);
 
 err_init_port:
        if (status && pi->root) {
@@ -1076,27 +1106,6 @@ sched_query_out:
        return status;
 }
 
-/**
- * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id
- * @pi: port information structure
- * @vsi_id: vsi id
- *
- * This function retrieves the vsi list for the given vsi id
- */
-static struct ice_sched_vsi_info *
-ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
-{
-       struct ice_sched_vsi_info *list_elem;
-
-       if (!pi)
-               return NULL;
-
-       list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry)
-               if (list_elem->vsi_id == vsi_id)
-                       return list_elem;
-       return NULL;
-}
-
 /**
  * ice_sched_find_node_in_subtree - Find node in part of base node subtree
  * @hw: pointer to the hw struct
@@ -1133,30 +1142,28 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
 /**
  * ice_sched_get_free_qparent - Get a free lan or rdma q group node
  * @pi: port information structure
- * @vsi_id: vsi id
+ * @vsi_handle: software VSI handle
  * @tc: branch number
  * @owner: lan or rdma
  *
  * This function retrieves a free lan or rdma q group node
  */
 struct ice_sched_node *
-ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
                           u8 owner)
 {
        struct ice_sched_node *vsi_node, *qgrp_node = NULL;
-       struct ice_sched_vsi_info *list_elem;
+       struct ice_vsi_ctx *vsi_ctx;
        u16 max_children;
        u8 qgrp_layer;
 
        qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
        max_children = pi->hw->max_children[qgrp_layer];
 
-       list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id);
-       if (!list_elem)
-               goto lan_q_exit;
-
-       vsi_node = list_elem->vsi_node[tc];
-
+       vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+       if (!vsi_ctx)
+               return NULL;
+       vsi_node = vsi_ctx->sched.vsi_node[tc];
        /* validate invalid VSI id */
        if (!vsi_node)
                goto lan_q_exit;
@@ -1180,14 +1187,14 @@ lan_q_exit:
  * ice_sched_get_vsi_node - Get a VSI node based on VSI id
  * @hw: pointer to the hw struct
  * @tc_node: pointer to the TC node
- * @vsi_id: VSI id
+ * @vsi_handle: software VSI handle
  *
  * This function retrieves a VSI node for a given VSI id from a given
  * TC branch
  */
 static struct ice_sched_node *
 ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
-                      u16 vsi_id)
+                      u16 vsi_handle)
 {
        struct ice_sched_node *node;
        u8 vsi_layer;
@@ -1197,7 +1204,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
 
        /* Check whether it already exists */
        while (node) {
-               if (node->vsi_id == vsi_id)
+               if (node->vsi_handle == vsi_handle)
                        return node;
                node = node->sibling;
        }
@@ -1236,7 +1243,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
 /**
  * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
  * @pi: port information structure
- * @vsi_id: VSI id
+ * @vsi_handle: software VSI handle
  * @tc_node: pointer to the TC node
  * @num_nodes: pointer to the num nodes that needs to be added per layer
  * @owner: node owner (lan or rdma)
@@ -1245,7 +1252,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
  * lan and rdma separately.
  */
 static enum ice_status
-ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
+ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
                              struct ice_sched_node *tc_node, u16 *num_nodes,
                              u8 owner)
 {
@@ -1258,7 +1265,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
 
        qgl = ice_sched_get_qgrp_layer(hw);
        vsil = ice_sched_get_vsi_layer(hw);
-       parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+       parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
        for (i = vsil + 1; i <= qgl; i++) {
                if (!parent)
                        return ICE_ERR_CFG;
@@ -1371,7 +1378,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
 /**
  * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
  * @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
  * @tc_node: pointer to TC node
  * @num_nodes: pointer to num nodes array
  *
@@ -1379,7 +1386,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
  * VSI, its parent and intermediate nodes in below layers
  */
 static enum ice_status
-ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
+ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
                                struct ice_sched_node *tc_node, u16 *num_nodes)
 {
        struct ice_sched_node *parent = tc_node;
@@ -1413,7 +1420,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
                        return ICE_ERR_CFG;
 
                if (i == vsil)
-                       parent->vsi_id = vsi_id;
+                       parent->vsi_handle = vsi_handle;
        }
 
        return 0;
@@ -1422,13 +1429,13 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
 /**
  * ice_sched_add_vsi_to_topo - add a new VSI into tree
  * @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
  * @tc: TC number
  *
  * This function adds a new VSI into scheduler tree
  */
 static enum ice_status
-ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
+ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
 {
        u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
        struct ice_sched_node *tc_node;
@@ -1442,13 +1449,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
        ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
 
        /* add vsi supported nodes to tc subtree */
-       return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes);
+       return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
+                                              num_nodes);
 }
 
 /**
  * ice_sched_update_vsi_child_nodes - update VSI child nodes
  * @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
  * @tc: TC number
  * @new_numqs: new number of max queues
  * @owner: owner of this subtree
@@ -1456,14 +1464,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
  * This function updates the VSI child nodes based on the number of queues
  */
 static enum ice_status
-ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
-                                u16 new_numqs, u8 owner)
+ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
+                                u8 tc, u16 new_numqs, u8 owner)
 {
        u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
        u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
        struct ice_sched_node *vsi_node;
        struct ice_sched_node *tc_node;
-       struct ice_sched_vsi_info *vsi;
+       struct ice_vsi_ctx *vsi_ctx;
        enum ice_status status = 0;
        struct ice_hw *hw = pi->hw;
        u16 prev_numqs;
@@ -1473,16 +1481,16 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
        if (!tc_node)
                return ICE_ERR_CFG;
 
-       vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+       vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
        if (!vsi_node)
                return ICE_ERR_CFG;
 
-       vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
-       if (!vsi)
-               return ICE_ERR_CFG;
+       vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+       if (!vsi_ctx)
+               return ICE_ERR_PARAM;
 
        if (owner == ICE_SCHED_NODE_OWNER_LAN)
-               prev_numqs = vsi->max_lanq[tc];
+               prev_numqs = vsi_ctx->sched.max_lanq[tc];
        else
                return ICE_ERR_PARAM;
 
@@ -1507,13 +1515,13 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
                for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
                        new_num_nodes[i] -= prev_num_nodes[i];
 
-               status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node,
+               status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
                                                       new_num_nodes, owner);
                if (status)
                        return status;
        }
 
-       vsi->max_lanq[tc] = new_numqs;
+       vsi_ctx->sched.max_lanq[tc] = new_numqs;
 
        return status;
 }
@@ -1521,7 +1529,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
 /**
  * ice_sched_cfg_vsi - configure the new/exisiting VSI
  * @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
  * @tc: TC number
  * @maxqs: max number of queues
  * @owner: lan or rdma
@@ -1532,25 +1540,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
  * disabled then suspend the VSI if it is not already.
  */
 enum ice_status
-ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
+ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
                  u8 owner, bool enable)
 {
        struct ice_sched_node *vsi_node, *tc_node;
-       struct ice_sched_vsi_info *vsi;
+       struct ice_vsi_ctx *vsi_ctx;
        enum ice_status status = 0;
        struct ice_hw *hw = pi->hw;
 
        tc_node = ice_sched_get_tc_node(pi, tc);
        if (!tc_node)
                return ICE_ERR_PARAM;
-
-       vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
-       if (!vsi)
-               vsi = ice_sched_create_vsi_info_entry(pi, vsi_id);
-       if (!vsi)
-               return ICE_ERR_NO_MEMORY;
-
-       vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+       vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+       if (!vsi_ctx)
+               return ICE_ERR_PARAM;
+       vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
 
        /* suspend the VSI if tc is not enabled */
        if (!enable) {
@@ -1567,20 +1571,26 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
 
        /* TC is enabled, if it is a new VSI then add it to the tree */
        if (!vsi_node) {
-               status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc);
+               status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
                if (status)
                        return status;
 
-               vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+               vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
                if (!vsi_node)
                        return ICE_ERR_CFG;
 
-               vsi->vsi_node[tc] = vsi_node;
+               vsi_ctx->sched.vsi_node[tc] = vsi_node;
                vsi_node->in_use = true;
+               /* invalidate the max queues whenever VSI gets added first time
+                * into the scheduler tree (boot or after reset). We need to
+                * recreate the child nodes all the time in these cases.
+                */
+               vsi_ctx->sched.max_lanq[tc] = 0;
        }
 
        /* update the VSI child nodes */
-       status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner);
+       status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
+                                                 owner);
        if (status)
                return status;
 
index badadcc120d31e71a4f8740162d21d7b0be242ba..5dc9cfa04c589734f15e2bb446fe52c1b1dc4a84 100644 (file)
@@ -12,7 +12,6 @@
 struct ice_sched_agg_vsi_info {
        struct list_head list_entry;
        DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
-       u16 vsi_id;
 };
 
 struct ice_sched_agg_info {
@@ -35,9 +34,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
 struct ice_sched_node *
-ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
                           u8 owner);
 enum ice_status
-ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
+ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
                  u8 owner, bool enable);
 #endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
new file mode 100644 (file)
index 0000000..027eba4
--- /dev/null
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_adminq_cmd.h"
+#include "ice_sriov.h"
+
+/**
+ * ice_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: VF ID to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to VF driver (0x0802) using mailbox
+ * queue and asynchronously sending message via
+ * ice_sq_send_cmd() function
+ */
+enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+                     u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+{
+       struct ice_aqc_pf_vf_msg *cmd;
+       struct ice_aq_desc desc;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+
+       cmd = &desc.params.virt;
+       cmd->id = cpu_to_le32(vfid);
+
+       desc.cookie_high = cpu_to_le32(v_opcode);
+       desc.cookie_low = cpu_to_le32(v_retval);
+
+       if (msglen)
+               desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+       return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+/**
+ * ice_conv_link_speed_to_virtchnl
+ * @adv_link_support: determines the format of the returned link speed
+ * @link_speed: variable containing the link_speed to be converted
+ *
+ * Convert link speed supported by HW to link speed supported by virtchnl.
+ * If adv_link_support is true, then return link speed in Mbps.  Else return
+ * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
+ * needs to cast back to an enum virtchnl_link_speed in the case where
+ * adv_link_support is false, but when adv_link_support is true the caller can
+ * expect the speed in Mbps.
+ */
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+{
+       u32 speed;
+
+       if (adv_link_support)
+               switch (link_speed) {
+               case ICE_AQ_LINK_SPEED_10MB:
+                       speed = ICE_LINK_SPEED_10MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_100MB:
+                       speed = ICE_LINK_SPEED_100MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_1000MB:
+                       speed = ICE_LINK_SPEED_1000MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_2500MB:
+                       speed = ICE_LINK_SPEED_2500MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_5GB:
+                       speed = ICE_LINK_SPEED_5000MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_10GB:
+                       speed = ICE_LINK_SPEED_10000MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_20GB:
+                       speed = ICE_LINK_SPEED_20000MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_25GB:
+                       speed = ICE_LINK_SPEED_25000MBPS;
+                       break;
+               case ICE_AQ_LINK_SPEED_40GB:
+                       speed = ICE_LINK_SPEED_40000MBPS;
+                       break;
+               default:
+                       speed = ICE_LINK_SPEED_UNKNOWN;
+                       break;
+               }
+       else
+               /* Virtchnl speeds are not defined for every speed supported in
+                * the hardware. To maintain compatibility with older AVF
+                * drivers, while reporting the speed the new speed values are
+                * resolved to the closest known virtchnl speeds
+                */
+               switch (link_speed) {
+               case ICE_AQ_LINK_SPEED_10MB:
+               case ICE_AQ_LINK_SPEED_100MB:
+                       speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
+                       break;
+               case ICE_AQ_LINK_SPEED_1000MB:
+               case ICE_AQ_LINK_SPEED_2500MB:
+               case ICE_AQ_LINK_SPEED_5GB:
+                       speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
+                       break;
+               case ICE_AQ_LINK_SPEED_10GB:
+                       speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
+                       break;
+               case ICE_AQ_LINK_SPEED_20GB:
+                       speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
+                       break;
+               case ICE_AQ_LINK_SPEED_25GB:
+                       speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
+                       break;
+               case ICE_AQ_LINK_SPEED_40GB:
+                       /* fall through */
+                       speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
+                       break;
+               default:
+                       speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
+                       break;
+               }
+
+       return speed;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
new file mode 100644 (file)
index 0000000..3d78a07
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_SRIOV_H_
+#define _ICE_SRIOV_H_
+
+#include "ice_common.h"
+
+#ifdef CONFIG_PCI_IOV
+enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+                     u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+#else /* CONFIG_PCI_IOV */
+static inline enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
+                     u16 __always_unused vfid, u32 __always_unused v_opcode,
+                     u32 __always_unused v_retval, u8 __always_unused *msg,
+                     u16 __always_unused msglen,
+                     struct ice_sq_cd __always_unused *cd)
+{
+       return 0;
+}
+
+static inline u32
+ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
+                               u16 __always_unused link_speed)
+{
+       return 0;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_SRIOV_H_ */
index d2dae913d81e0bac21f3b978b1cfdacf8785d0f1..f49f299ddf2c048f714190a2fb7d02e8d197e6ae 100644 (file)
@@ -6,6 +6,9 @@
 
 /* Error Codes */
 enum ice_status {
+       ICE_SUCCESS                             = 0,
+
+       /* Generic codes : Range -1..-49 */
        ICE_ERR_PARAM                           = -1,
        ICE_ERR_NOT_IMPL                        = -2,
        ICE_ERR_NOT_READY                       = -3,
index 65b4e1cca6bef412d15f1d1cf6985ce776c5aae6..33403f39f1b3f8680dcf5b63c37956a5df2d0fad 100644 (file)
@@ -106,6 +106,7 @@ ice_init_def_sw_recp(struct ice_hw *hw)
        for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
                recps[i].root_rid = i;
                INIT_LIST_HEAD(&recps[i].filt_rules);
+               INIT_LIST_HEAD(&recps[i].filt_replay_rules);
                mutex_init(&recps[i].filt_rule_lock);
        }
 
@@ -186,6 +187,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
        if (!vsi_ctx->alloc_from_pool)
                cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
                                           ICE_AQ_VSI_IS_VALID);
+       cmd->vf_id = vsi_ctx->vf_num;
 
        cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
 
@@ -247,7 +249,7 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  *
  * Update VSI context in the hardware (0x0211)
  */
-enum ice_status
+static enum ice_status
 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
                  struct ice_sq_cd *cd)
 {
@@ -276,65 +278,6 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
        return status;
 }
 
-/**
- * ice_update_fltr_vsi_map - update given filter VSI map
- * @list_head: list for which filters needs to be updated
- * @list_lock: filter lock which needs to be updated
- * @old_vsi_num: old VSI HW id
- * @new_vsi_num: new VSI HW id
- *
- * update the VSI map for a given filter list
- */
-static void
-ice_update_fltr_vsi_map(struct list_head *list_head,
-                       struct mutex *list_lock, u16 old_vsi_num,
-                       u16 new_vsi_num)
-{
-       struct ice_fltr_mgmt_list_entry *itr;
-
-       mutex_lock(list_lock);
-       if (list_empty(list_head))
-               goto exit_update_map;
-
-       list_for_each_entry(itr, list_head, list_entry) {
-               if (itr->vsi_list_info &&
-                   test_bit(old_vsi_num, itr->vsi_list_info->vsi_map)) {
-                       clear_bit(old_vsi_num, itr->vsi_list_info->vsi_map);
-                       set_bit(new_vsi_num, itr->vsi_list_info->vsi_map);
-               } else if (itr->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
-                          itr->fltr_info.fwd_id.vsi_id == old_vsi_num) {
-                       itr->fltr_info.fwd_id.vsi_id = new_vsi_num;
-                       itr->fltr_info.src = new_vsi_num;
-               }
-       }
-exit_update_map:
-       mutex_unlock(list_lock);
-}
-
-/**
- * ice_update_all_fltr_vsi_map - update all filters VSI map
- * @hw: pointer to the hardware structure
- * @old_vsi_num: old VSI HW id
- * @new_vsi_num: new VSI HW id
- *
- * update all filters VSI map
- */
-static void
-ice_update_all_fltr_vsi_map(struct ice_hw *hw, u16 old_vsi_num, u16 new_vsi_num)
-{
-       struct ice_switch_info *sw = hw->switch_info;
-       u8 i;
-
-       for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
-               struct list_head *head = &sw->recp_list[i].filt_rules;
-               struct mutex *lock; /* Lock to protect filter rule list */
-
-               lock = &sw->recp_list[i].filt_rule_lock;
-               ice_update_fltr_vsi_map(head, lock, old_vsi_num,
-                                       new_vsi_num);
-       }
-}
-
 /**
  * ice_is_vsi_valid - check whether the VSI is valid or not
  * @hw: pointer to the hw struct
@@ -342,7 +285,7 @@ ice_update_all_fltr_vsi_map(struct ice_hw *hw, u16 old_vsi_num, u16 new_vsi_num)
  *
  * check whether the VSI is valid or not
  */
-static bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
 {
        return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
 }
@@ -355,7 +298,7 @@ static bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
  * return the hw VSI number
  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
  */
-static u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
+u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
 {
        return hw->vsi_ctx[vsi_handle]->vsi_num;
 }
@@ -367,7 +310,7 @@ static u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
  *
  * return the VSI context entry for a given VSI handle
  */
-static struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
+struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
 {
        return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
 }
@@ -440,12 +383,8 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
                ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
        } else {
                /* update with new HW VSI num */
-               if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num) {
-                       /* update all filter lists with new HW VSI num */
-                       ice_update_all_fltr_vsi_map(hw, tmp_vsi_ctx->vsi_num,
-                                                   vsi_ctx->vsi_num);
+               if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
                        tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
-               }
        }
 
        return status;
@@ -476,6 +415,25 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
        return status;
 }
 
+/**
+ * ice_update_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_handle: unique VSI handle
+ * @vsi_ctx: pointer to a VSI context struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update VSI context in the hardware
+ */
+enum ice_status
+ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+              struct ice_sq_cd *cd)
+{
+       if (!ice_is_vsi_valid(hw, vsi_handle))
+               return ICE_ERR_PARAM;
+       vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
+       return ice_aq_update_vsi(hw, vsi_ctx, cd);
+}
+
 /**
  * ice_aq_alloc_free_vsi_list
  * @hw: pointer to the hw struct
@@ -698,6 +656,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
        u8 *eth_hdr;
        u32 act = 0;
        __be16 *off;
+       u8 q_rgn;
 
        if (opc == ice_aqc_opc_remove_sw_rules) {
                s_rule->pdata.lkup_tx_rx.act = 0;
@@ -716,7 +675,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
 
        switch (f_info->fltr_act) {
        case ICE_FWD_TO_VSI:
-               act |= (f_info->fwd_id.vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
+               act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
                        ICE_SINGLE_ACT_VSI_ID_M;
                if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
                        act |= ICE_SINGLE_ACT_VSI_FORWARDING |
@@ -736,14 +695,19 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
                act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
                        ICE_SINGLE_ACT_Q_INDEX_M;
                break;
+       case ICE_DROP_PACKET:
+               act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
+                       ICE_SINGLE_ACT_VALID_BIT;
+               break;
        case ICE_FWD_TO_QGRP:
+               q_rgn = f_info->qgrp_size > 0 ?
+                       (u8)ilog2(f_info->qgrp_size) : 0;
                act |= ICE_SINGLE_ACT_TO_Q;
-               act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) &
+               act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+                       ICE_SINGLE_ACT_Q_INDEX_M;
+               act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
                        ICE_SINGLE_ACT_Q_REGION_M;
                break;
-       case ICE_DROP_PACKET:
-               act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
-               break;
        default:
                return;
        }
@@ -832,8 +796,8 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
        enum ice_status status;
        u16 lg_act_size;
        u16 rules_size;
-       u16 vsi_info;
        u32 act;
+       u16 id;
 
        if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
                return ICE_ERR_PARAM;
@@ -859,12 +823,11 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
        /* First action VSI forwarding or VSI list forwarding depending on how
         * many VSIs
         */
-       vsi_info = (m_ent->vsi_count > 1) ?
-               m_ent->fltr_info.fwd_id.vsi_list_id :
-               m_ent->fltr_info.fwd_id.vsi_id;
+       id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
+               m_ent->fltr_info.fwd_id.hw_vsi_id;
 
        act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
-       act |= (vsi_info << ICE_LG_ACT_VSI_LIST_ID_S) &
+       act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
                ICE_LG_ACT_VSI_LIST_ID_M;
        if (m_ent->vsi_count > 1)
                act |= ICE_LG_ACT_VSI_LIST;
@@ -917,15 +880,15 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
 /**
  * ice_create_vsi_list_map
  * @hw: pointer to the hardware structure
- * @vsi_array: array of VSIs to form a VSI list
- * @num_vsi: num VSI in the array
+ * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
+ * @num_vsi: number of VSI handles in the array
  * @vsi_list_id: VSI list id generated as part of allocate resource
  *
  * Helper function to create a new entry of VSI list id to VSI mapping
  * using the given VSI list id
  */
 static struct ice_vsi_list_map_info *
-ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
                        u16 vsi_list_id)
 {
        struct ice_switch_info *sw = hw->switch_info;
@@ -937,9 +900,9 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
                return NULL;
 
        v_map->vsi_list_id = vsi_list_id;
-
+       v_map->ref_cnt = 1;
        for (i = 0; i < num_vsi; i++)
-               set_bit(vsi_array[i], v_map->vsi_map);
+               set_bit(vsi_handle_arr[i], v_map->vsi_map);
 
        list_add(&v_map->list_entry, &sw->vsi_list_map_head);
        return v_map;
@@ -948,8 +911,8 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
 /**
  * ice_update_vsi_list_rule
  * @hw: pointer to the hardware structure
- * @vsi_array: array of VSIs to form a VSI list
- * @num_vsi: num VSI in the array
+ * @vsi_handle_arr: array of VSI handles to form a VSI list
+ * @num_vsi: number of VSI handles in the array
  * @vsi_list_id: VSI list id generated as part of allocate resource
  * @remove: Boolean value to indicate if this is a remove action
  * @opc: switch rules population command type - pass in the command opcode
@@ -959,7 +922,7 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
  * using the given VSI list id
  */
 static enum ice_status
-ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
                         u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
                         enum ice_sw_lkup_type lkup_type)
 {
@@ -990,9 +953,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
        s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
        if (!s_rule)
                return ICE_ERR_NO_MEMORY;
-
-       for (i = 0; i < num_vsi; i++)
-               s_rule->pdata.vsi_list.vsi[i] = cpu_to_le16(vsi_array[i]);
+       for (i = 0; i < num_vsi; i++) {
+               if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
+                       status = ICE_ERR_PARAM;
+                       goto exit;
+               }
+               /* AQ call requires hw_vsi_id(s) */
+               s_rule->pdata.vsi_list.vsi[i] =
+                       cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
+       }
 
        s_rule->type = cpu_to_le16(type);
        s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
@@ -1000,6 +969,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
 
        status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
 
+exit:
        devm_kfree(ice_hw_to_dev(hw), s_rule);
        return status;
 }
@@ -1007,21 +977,16 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
 /**
  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
  * @hw: pointer to the hw struct
- * @vsi_array: array of VSIs to form a VSI list
- * @num_vsi: number of VSIs in the array
+ * @vsi_handle_arr: array of VSI handles to form a VSI list
+ * @num_vsi: number of VSI handles in the array
  * @vsi_list_id: stores the ID of the VSI list to be created
  * @lkup_type: switch rule filter's lookup type
  */
 static enum ice_status
-ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
                         u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
 {
        enum ice_status status;
-       int i;
-
-       for (i = 0; i < num_vsi; i++)
-               if (vsi_array[i] >= ICE_MAX_VSI)
-                       return ICE_ERR_OUT_OF_RANGE;
 
        status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
                                            ice_aqc_opc_alloc_res);
@@ -1029,9 +994,9 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
                return status;
 
        /* Update the newly created VSI list to include the specified VSIs */
-       return ice_update_vsi_list_rule(hw, vsi_array, num_vsi, *vsi_list_id,
-                                       false, ice_aqc_opc_add_sw_rules,
-                                       lkup_type);
+       return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
+                                       *vsi_list_id, false,
+                                       ice_aqc_opc_add_sw_rules, lkup_type);
 }
 
 /**
@@ -1217,15 +1182,15 @@ ice_add_update_vsi_list(struct ice_hw *hw,
                 * new VSIs.
                 */
                struct ice_fltr_info tmp_fltr;
-               u16 vsi_id_arr[2];
+               u16 vsi_handle_arr[2];
 
                /* A rule already exists with the new VSI being added */
-               if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id)
+               if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
                        return ICE_ERR_ALREADY_EXISTS;
 
-               vsi_id_arr[0] = cur_fltr->fwd_id.vsi_id;
-               vsi_id_arr[1] = new_fltr->fwd_id.vsi_id;
-               status = ice_create_vsi_list_rule(hw, &vsi_id_arr[0], 2,
+               vsi_handle_arr[0] = cur_fltr->vsi_handle;
+               vsi_handle_arr[1] = new_fltr->vsi_handle;
+               status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
                                                  &vsi_list_id,
                                                  new_fltr->lkup_type);
                if (status)
@@ -1245,7 +1210,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
                cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
                cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
                m_entry->vsi_list_info =
-                       ice_create_vsi_list_map(hw, &vsi_id_arr[0], 2,
+                       ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
                                                vsi_list_id);
 
                /* If this entry was large action then the large action needs
@@ -1257,11 +1222,11 @@ ice_add_update_vsi_list(struct ice_hw *hw,
                                               m_entry->sw_marker_id,
                                               m_entry->lg_act_idx);
        } else {
-               u16 vsi_id = new_fltr->fwd_id.vsi_id;
+               u16 vsi_handle = new_fltr->vsi_handle;
                enum ice_adminq_opc opcode;
 
                /* A rule already exists with the new VSI being added */
-               if (test_bit(vsi_id, m_entry->vsi_list_info->vsi_map))
+               if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
                        return 0;
 
                /* Update the previously created VSI list set with
@@ -1270,12 +1235,12 @@ ice_add_update_vsi_list(struct ice_hw *hw,
                vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
                opcode = ice_aqc_opc_update_sw_rules;
 
-               status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
-                                                 false, opcode,
+               status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
+                                                 vsi_list_id, false, opcode,
                                                  new_fltr->lkup_type);
                /* update VSI list mapping info with new VSI id */
                if (!status)
-                       set_bit(vsi_id, m_entry->vsi_list_info->vsi_map);
+                       set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
        }
        if (!status)
                m_entry->vsi_count++;
@@ -1310,6 +1275,39 @@ ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
        return ret;
 }
 
+/**
+ * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
+ * @hw: pointer to the hardware structure
+ * @recp_id: lookup type for which VSI lists needs to be searched
+ * @vsi_handle: VSI handle to be found in VSI list
+ * @vsi_list_id: VSI list id found containing vsi_handle
+ *
+ * Helper function to search a VSI list with single entry containing given VSI
+ * handle element. This can be extended further to search VSI list with more
+ * than 1 vsi_count. Returns pointer to VSI list entry if found.
+ */
+static struct ice_vsi_list_map_info *
+ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
+                       u16 *vsi_list_id)
+{
+       struct ice_vsi_list_map_info *map_info = NULL;
+       struct ice_switch_info *sw = hw->switch_info;
+       struct ice_fltr_mgmt_list_entry *list_itr;
+       struct list_head *list_head;
+
+       list_head = &sw->recp_list[recp_id].filt_rules;
+       list_for_each_entry(list_itr, list_head, list_entry) {
+               if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
+                       map_info = list_itr->vsi_list_info;
+                       if (test_bit(vsi_handle, map_info->vsi_map)) {
+                               *vsi_list_id = map_info->vsi_list_id;
+                               return map_info;
+                       }
+               }
+       }
+       return NULL;
+}
+
 /**
  * ice_add_rule_internal - add rule for a given lookup type
  * @hw: pointer to the hardware structure
@@ -1328,6 +1326,11 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
        struct mutex *rule_lock; /* Lock to protect filter rule list */
        enum ice_status status = 0;
 
+       if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+               return ICE_ERR_PARAM;
+       f_entry->fltr_info.fwd_id.hw_vsi_id =
+               ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
+
        rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
 
        mutex_lock(rule_lock);
@@ -1335,7 +1338,7 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
        if (new_fltr->flag & ICE_FLTR_RX)
                new_fltr->src = hw->port_info->lport;
        else if (new_fltr->flag & ICE_FLTR_TX)
-               new_fltr->src = f_entry->fltr_info.fwd_id.vsi_id;
+               new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
 
        m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
        if (!m_entry) {
@@ -1388,12 +1391,12 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
 /**
  * ice_rem_update_vsi_list
  * @hw: pointer to the hardware structure
- * @vsi_id: ID of the VSI to remove
+ * @vsi_handle: VSI handle of the VSI to remove
  * @fm_list: filter management entry for which the VSI list management needs to
  *           be done
  */
 static enum ice_status
-ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_id,
+ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
                        struct ice_fltr_mgmt_list_entry *fm_list)
 {
        enum ice_sw_lkup_type lkup_type;
@@ -1405,47 +1408,67 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_id,
                return ICE_ERR_PARAM;
 
        /* A rule with the VSI being removed does not exist */
-       if (!test_bit(vsi_id, fm_list->vsi_list_info->vsi_map))
+       if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
                return ICE_ERR_DOES_NOT_EXIST;
 
        lkup_type = fm_list->fltr_info.lkup_type;
        vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
-
-       status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id, true,
+       status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
                                          ice_aqc_opc_update_sw_rules,
                                          lkup_type);
        if (status)
                return status;
 
        fm_list->vsi_count--;
-       clear_bit(vsi_id, fm_list->vsi_list_info->vsi_map);
+       clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
 
-       if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
-           (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
+       if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
+               struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
                struct ice_vsi_list_map_info *vsi_list_info =
                        fm_list->vsi_list_info;
-               u16 rem_vsi_id;
+               u16 rem_vsi_handle;
 
-               rem_vsi_id = find_first_bit(vsi_list_info->vsi_map,
-                                           ICE_MAX_VSI);
-               if (rem_vsi_id == ICE_MAX_VSI)
+               rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
+                                               ICE_MAX_VSI);
+               if (!ice_is_vsi_valid(hw, rem_vsi_handle))
                        return ICE_ERR_OUT_OF_RANGE;
 
-               status = ice_update_vsi_list_rule(hw, &rem_vsi_id, 1,
+               /* Make sure VSI list is empty before removing it below */
+               status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
                                                  vsi_list_id, true,
                                                  ice_aqc_opc_update_sw_rules,
                                                  lkup_type);
                if (status)
                        return status;
 
+               tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
+               tmp_fltr_info.fwd_id.hw_vsi_id =
+                       ice_get_hw_vsi_num(hw, rem_vsi_handle);
+               tmp_fltr_info.vsi_handle = rem_vsi_handle;
+               status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
+               if (status) {
+                       ice_debug(hw, ICE_DBG_SW,
+                                 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+                                 tmp_fltr_info.fwd_id.hw_vsi_id, status);
+                       return status;
+               }
+
+               fm_list->fltr_info = tmp_fltr_info;
+       }
+
+       if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
+           (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
+               struct ice_vsi_list_map_info *vsi_list_info =
+                       fm_list->vsi_list_info;
+
                /* Remove the VSI list since it is no longer used */
                status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
-               if (status)
+               if (status) {
+                       ice_debug(hw, ICE_DBG_SW,
+                                 "Failed to remove VSI list %d, error %d\n",
+                                 vsi_list_id, status);
                        return status;
-
-               /* Change the list entry action from VSI_LIST to VSI */
-               fm_list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
-               fm_list->fltr_info.fwd_id.vsi_id = rem_vsi_id;
+               }
 
                list_del(&vsi_list_info->list_entry);
                devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
@@ -1470,7 +1493,12 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
        struct mutex *rule_lock; /* Lock to protect filter rule list */
        enum ice_status status = 0;
        bool remove_rule = false;
-       u16 vsi_id;
+       u16 vsi_handle;
+
+       if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+               return ICE_ERR_PARAM;
+       f_entry->fltr_info.fwd_id.hw_vsi_id =
+               ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
 
        rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
        mutex_lock(rule_lock);
@@ -1482,9 +1510,14 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
 
        if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
                remove_rule = true;
+       } else if (!list_elem->vsi_list_info) {
+               status = ICE_ERR_DOES_NOT_EXIST;
+               goto exit;
        } else {
-               vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
-               status = ice_rem_update_vsi_list(hw, vsi_id, list_elem);
+               if (list_elem->vsi_list_info->ref_cnt > 1)
+                       list_elem->vsi_list_info->ref_cnt--;
+               vsi_handle = f_entry->fltr_info.vsi_handle;
+               status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
                if (status)
                        goto exit;
                /* if vsi count goes to zero after updating the vsi list */
@@ -1556,8 +1589,19 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
        rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
        list_for_each_entry(m_list_itr, m_list, list_entry) {
                u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
+               u16 vsi_handle;
+               u16 hw_vsi_id;
 
                m_list_itr->fltr_info.flag = ICE_FLTR_TX;
+               vsi_handle = m_list_itr->fltr_info.vsi_handle;
+               if (!ice_is_vsi_valid(hw, vsi_handle))
+                       return ICE_ERR_PARAM;
+               hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+               m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
+               /* update the src in case it is vsi num */
+               if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
+                       return ICE_ERR_PARAM;
+               m_list_itr->fltr_info.src = hw_vsi_id;
                if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
                    is_zero_ether_addr(add))
                        return ICE_ERR_PARAM;
@@ -1676,57 +1720,145 @@ static enum ice_status
 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
 {
        struct ice_switch_info *sw = hw->switch_info;
-       struct ice_fltr_info *new_fltr, *cur_fltr;
        struct ice_fltr_mgmt_list_entry *v_list_itr;
+       struct ice_fltr_info *new_fltr, *cur_fltr;
+       enum ice_sw_lkup_type lkup_type;
+       u16 vsi_list_id = 0, vsi_handle;
        struct mutex *rule_lock; /* Lock to protect filter rule list */
        enum ice_status status = 0;
 
+       if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+               return ICE_ERR_PARAM;
+
+       f_entry->fltr_info.fwd_id.hw_vsi_id =
+               ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
        new_fltr = &f_entry->fltr_info;
+
        /* VLAN id should only be 12 bits */
        if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
                return ICE_ERR_PARAM;
 
+       if (new_fltr->src_id != ICE_SRC_ID_VSI)
+               return ICE_ERR_PARAM;
+
+       new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
+       lkup_type = new_fltr->lkup_type;
+       vsi_handle = new_fltr->vsi_handle;
        rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
        mutex_lock(rule_lock);
        v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
        if (!v_list_itr) {
-               u16 vsi_id = ICE_VSI_INVAL_ID;
-               u16 vsi_list_id = 0;
+               struct ice_vsi_list_map_info *map_info = NULL;
 
                if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
-                       enum ice_sw_lkup_type lkup_type = new_fltr->lkup_type;
-
-                       /* All VLAN pruning rules use a VSI list.
-                        * Convert the action to forwarding to a VSI list.
+                       /* All VLAN pruning rules use a VSI list. Check if
+                        * there is already a VSI list containing VSI that we
+                        * want to add. If found, use the same vsi_list_id for
+                        * this new VLAN rule or else create a new list.
                         */
-                       vsi_id = new_fltr->fwd_id.vsi_id;
-                       status = ice_create_vsi_list_rule(hw, &vsi_id, 1,
-                                                         &vsi_list_id,
-                                                         lkup_type);
-                       if (status)
-                               goto exit;
+                       map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
+                                                          vsi_handle,
+                                                          &vsi_list_id);
+                       if (!map_info) {
+                               status = ice_create_vsi_list_rule(hw,
+                                                                 &vsi_handle,
+                                                                 1,
+                                                                 &vsi_list_id,
+                                                                 lkup_type);
+                               if (status)
+                                       goto exit;
+                       }
+                       /* Convert the action to forwarding to a VSI list. */
                        new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
                        new_fltr->fwd_id.vsi_list_id = vsi_list_id;
                }
 
                status = ice_create_pkt_fwd_rule(hw, f_entry);
-               if (!status && vsi_id != ICE_VSI_INVAL_ID) {
+               if (!status) {
                        v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
                                                         new_fltr);
                        if (!v_list_itr) {
                                status = ICE_ERR_DOES_NOT_EXIST;
                                goto exit;
                        }
-                       v_list_itr->vsi_list_info =
-                               ice_create_vsi_list_map(hw, &vsi_id, 1,
-                                                       vsi_list_id);
+                       /* reuse VSI list for new rule and increment ref_cnt */
+                       if (map_info) {
+                               v_list_itr->vsi_list_info = map_info;
+                               map_info->ref_cnt++;
+                       } else {
+                               v_list_itr->vsi_list_info =
+                                       ice_create_vsi_list_map(hw, &vsi_handle,
+                                                               1, vsi_list_id);
+                       }
                }
+       } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
+               /* Update existing VSI list to add new VSI id only if it used
+                * by one VLAN rule.
+                */
+               cur_fltr = &v_list_itr->fltr_info;
+               status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
+                                                new_fltr);
+       } else {
+               /* If VLAN rule exists and VSI list being used by this rule is
+                * referenced by more than 1 VLAN rule. Then create a new VSI
+                * list appending previous VSI with new VSI and update existing
+                * VLAN rule to point to new VSI list id
+                */
+               struct ice_fltr_info tmp_fltr;
+               u16 vsi_handle_arr[2];
+               u16 cur_handle;
 
-               goto exit;
-       }
+               /* Current implementation only supports reusing VSI list with
+                * one VSI count. We should never hit below condition
+                */
+               if (v_list_itr->vsi_count > 1 &&
+                   v_list_itr->vsi_list_info->ref_cnt > 1) {
+                       ice_debug(hw, ICE_DBG_SW,
+                                 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
+                       status = ICE_ERR_CFG;
+                       goto exit;
+               }
+
+               cur_handle =
+                       find_first_bit(v_list_itr->vsi_list_info->vsi_map,
+                                      ICE_MAX_VSI);
+
+               /* A rule already exists with the new VSI being added */
+               if (cur_handle == vsi_handle) {
+                       status = ICE_ERR_ALREADY_EXISTS;
+                       goto exit;
+               }
+
+               vsi_handle_arr[0] = cur_handle;
+               vsi_handle_arr[1] = vsi_handle;
+               status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
+                                                 &vsi_list_id, lkup_type);
+               if (status)
+                       goto exit;
+
+               tmp_fltr = v_list_itr->fltr_info;
+               tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
+               tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+               tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+               /* Update the previous switch rule to a new VSI list which
+                * includes current VSI thats requested
+                */
+               status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+               if (status)
+                       goto exit;
+
+               /* before overriding VSI list map info. decrement ref_cnt of
+                * previous VSI list
+                */
+               v_list_itr->vsi_list_info->ref_cnt--;
 
-       cur_fltr = &v_list_itr->fltr_info;
-       status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, new_fltr);
+               /* now update to newly created list */
+               v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
+               v_list_itr->vsi_list_info =
+                       ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
+                                               vsi_list_id);
+               v_list_itr->vsi_count++;
+       }
 
 exit:
        mutex_unlock(rule_lock);
@@ -1779,7 +1911,7 @@ ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
 /**
  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
  * @hw: pointer to the hardware structure
- * @vsi_id: number of VSI to set as default
+ * @vsi_handle: VSI handle to set as default
  * @set: true to add the above mentioned switch rule, false to remove it
  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
  *
@@ -1787,13 +1919,18 @@ ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
  * (represented by swid)
  */
 enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
+ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
 {
        struct ice_aqc_sw_rules_elem *s_rule;
        struct ice_fltr_info f_info;
        enum ice_adminq_opc opcode;
        enum ice_status status;
        u16 s_rule_size;
+       u16 hw_vsi_id;
+
+       if (!ice_is_vsi_valid(hw, vsi_handle))
+               return ICE_ERR_PARAM;
+       hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
 
        s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
                            ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
@@ -1806,15 +1943,17 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
        f_info.lkup_type = ICE_SW_LKUP_DFLT;
        f_info.flag = direction;
        f_info.fltr_act = ICE_FWD_TO_VSI;
-       f_info.fwd_id.vsi_id = vsi_id;
+       f_info.fwd_id.hw_vsi_id = hw_vsi_id;
 
        if (f_info.flag & ICE_FLTR_RX) {
                f_info.src = hw->port_info->lport;
+               f_info.src_id = ICE_SRC_ID_LPORT;
                if (!set)
                        f_info.fltr_rule_id =
                                hw->port_info->dflt_rx_vsi_rule_id;
        } else if (f_info.flag & ICE_FLTR_TX) {
-               f_info.src = vsi_id;
+               f_info.src_id = ICE_SRC_ID_VSI;
+               f_info.src = hw_vsi_id;
                if (!set)
                        f_info.fltr_rule_id =
                                hw->port_info->dflt_tx_vsi_rule_id;
@@ -1834,10 +1973,10 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
                u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
 
                if (f_info.flag & ICE_FLTR_TX) {
-                       hw->port_info->dflt_tx_vsi_num = vsi_id;
+                       hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
                        hw->port_info->dflt_tx_vsi_rule_id = index;
                } else if (f_info.flag & ICE_FLTR_RX) {
-                       hw->port_info->dflt_rx_vsi_num = vsi_id;
+                       hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
                        hw->port_info->dflt_rx_vsi_rule_id = index;
                }
        } else {
@@ -1871,12 +2010,12 @@ out:
 enum ice_status
 ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
 {
-       struct ice_fltr_list_entry *list_itr;
+       struct ice_fltr_list_entry *list_itr, *tmp;
 
        if (!m_list)
                return ICE_ERR_PARAM;
 
-       list_for_each_entry(list_itr, m_list, list_entry) {
+       list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
                enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
 
                if (l_type != ICE_SW_LKUP_MAC)
@@ -1898,12 +2037,12 @@ ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
 enum ice_status
 ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
 {
-       struct ice_fltr_list_entry *v_list_itr;
+       struct ice_fltr_list_entry *v_list_itr, *tmp;
 
        if (!v_list || !hw)
                return ICE_ERR_PARAM;
 
-       list_for_each_entry(v_list_itr, v_list, list_entry) {
+       list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
                enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
 
                if (l_type != ICE_SW_LKUP_VLAN)
@@ -1920,21 +2059,21 @@ ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
 /**
  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
  * @fm_entry: filter entry to inspect
- * @vsi_id: ID of VSI to compare with filter info
+ * @vsi_handle: VSI handle to compare with filter info
  */
 static bool
-ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_id)
+ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
 {
        return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
-                fm_entry->fltr_info.fwd_id.vsi_id == vsi_id) ||
+                fm_entry->fltr_info.vsi_handle == vsi_handle) ||
                (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
-                (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map))));
+                (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
 }
 
 /**
  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
  * @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
  * @vsi_list_head: pointer to the list to add entry to
  * @fi: pointer to fltr_info of filter entry to copy & add
  *
@@ -1945,7 +2084,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_id)
  * extract which VSI to remove the fltr from, and pass on that information.
  */
 static enum ice_status
-ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
+ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
                               struct list_head *vsi_list_head,
                               struct ice_fltr_info *fi)
 {
@@ -1966,7 +2105,8 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
         * values.
         */
        tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
-       tmp->fltr_info.fwd_id.vsi_id = vsi_id;
+       tmp->fltr_info.vsi_handle = vsi_handle;
+       tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
 
        list_add(&tmp->list_entry, vsi_list_head);
 
@@ -1976,9 +2116,9 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
 /**
  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
  * @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
  * @lkup_list_head: pointer to the list that has certain lookup type filters
- * @vsi_list_head: pointer to the list pertaining to VSI with vsi_id
+ * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
  *
  * Locates all filters in lkup_list_head that are used by the given VSI,
  * and adds COPIES of those entries to vsi_list_head (intended to be used
@@ -1987,7 +2127,7 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
  * deallocated by the caller when done with list.
  */
 static enum ice_status
-ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
+ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
                         struct list_head *lkup_list_head,
                         struct list_head *vsi_list_head)
 {
@@ -1995,17 +2135,17 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
        enum ice_status status = 0;
 
        /* check to make sure VSI id is valid and within boundary */
-       if (vsi_id >= ICE_MAX_VSI)
+       if (!ice_is_vsi_valid(hw, vsi_handle))
                return ICE_ERR_PARAM;
 
        list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
                struct ice_fltr_info *fi;
 
                fi = &fm_entry->fltr_info;
-               if (!ice_vsi_uses_fltr(fm_entry, vsi_id))
+               if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
                        continue;
 
-               status = ice_add_entry_to_vsi_fltr_list(hw, vsi_id,
+               status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
                                                        vsi_list_head, fi);
                if (status)
                        return status;
@@ -2016,11 +2156,11 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
 /**
  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
  * @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
  * @lkup: switch rule filter lookup type
  */
 static void
-ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
+ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
                         enum ice_sw_lkup_type lkup)
 {
        struct ice_switch_info *sw = hw->switch_info;
@@ -2035,7 +2175,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
        rule_lock = &sw->recp_list[lkup].filt_rule_lock;
        rule_head = &sw->recp_list[lkup].filt_rules;
        mutex_lock(rule_lock);
-       status = ice_add_to_vsi_fltr_list(hw, vsi_id, rule_head,
+       status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
                                          &remove_list_head);
        mutex_unlock(rule_lock);
        if (status)
@@ -2069,102 +2209,121 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
 /**
  * ice_remove_vsi_fltr - Remove all filters for a VSI
  * @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
  */
-void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id)
+void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
 {
-       ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC);
-       ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC_VLAN);
-       ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC);
-       ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_VLAN);
-       ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_DFLT);
-       ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE);
-       ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC);
-       ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN);
+       ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
+       ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
+       ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
+       ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
+       ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
+       ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
+       ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
+       ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
 }
 
 /**
- * ice_replay_fltr - Replay all the filters stored by a specific list head
+ * ice_replay_vsi_fltr - Replay filters for requested VSI
  * @hw: pointer to the hardware structure
- * @list_head: list for which filters needs to be replayed
+ * @vsi_handle: driver VSI handle
  * @recp_id: Recipe id for which rules need to be replayed
+ * @list_head: list for which filters need to be replayed
+ *
+ * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
+ * It is required to pass valid VSI handle.
  */
 static enum ice_status
-ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct list_head *list_head)
+ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
+                   struct list_head *list_head)
 {
        struct ice_fltr_mgmt_list_entry *itr;
-       struct list_head l_head;
        enum ice_status status = 0;
+       u16 hw_vsi_id;
 
        if (list_empty(list_head))
                return status;
+       hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
 
-       /* Move entries from the given list_head to a temporary l_head so that
-        * they can be replayed. Otherwise when trying to re-add the same
-        * filter, the function will return already exists
-        */
-       list_replace_init(list_head, &l_head);
-
-       /* Mark the given list_head empty by reinitializing it so filters
-        * could be added again by *handler
-        */
-       list_for_each_entry(itr, &l_head, list_entry) {
+       list_for_each_entry(itr, list_head, list_entry) {
                struct ice_fltr_list_entry f_entry;
 
                f_entry.fltr_info = itr->fltr_info;
-               if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
+               if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
+                   itr->fltr_info.vsi_handle == vsi_handle) {
+                       /* update the src in case it is vsi num */
+                       if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
+                               f_entry.fltr_info.src = hw_vsi_id;
                        status = ice_add_rule_internal(hw, recp_id, &f_entry);
                        if (status)
                                goto end;
                        continue;
                }
-
-               /* Add a filter per vsi separately */
-               while (1) {
-                       u16 vsi;
-
-                       vsi = find_first_bit(itr->vsi_list_info->vsi_map,
-                                            ICE_MAX_VSI);
-                       if (vsi == ICE_MAX_VSI)
-                               break;
-
-                       clear_bit(vsi, itr->vsi_list_info->vsi_map);
-                       f_entry.fltr_info.fwd_id.vsi_id = vsi;
-                       f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
-                       if (recp_id == ICE_SW_LKUP_VLAN)
-                               status = ice_add_vlan_internal(hw, &f_entry);
-                       else
-                               status = ice_add_rule_internal(hw, recp_id,
-                                                              &f_entry);
-                       if (status)
-                               goto end;
-               }
+               if (!itr->vsi_list_info ||
+                   !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
+                       continue;
+               /* Clearing it so that the logic can add it back */
+               clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
+               f_entry.fltr_info.vsi_handle = vsi_handle;
+               f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
+               /* update the src in case it is vsi num */
+               if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
+                       f_entry.fltr_info.src = hw_vsi_id;
+               if (recp_id == ICE_SW_LKUP_VLAN)
+                       status = ice_add_vlan_internal(hw, &f_entry);
+               else
+                       status = ice_add_rule_internal(hw, recp_id, &f_entry);
+               if (status)
+                       goto end;
        }
 end:
-       /* Clear the filter management list */
-       ice_rem_sw_rule_info(hw, &l_head);
        return status;
 }
 
 /**
- * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
+ * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
  * @hw: pointer to the hardware structure
+ * @vsi_handle: driver VSI handle
  *
- * NOTE: This function does not clean up partially added filters on error.
- * It is up to caller of the function to issue a reset or fail early.
+ * Replays filters for requested VSI via vsi_handle.
  */
-enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
+enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
 {
        struct ice_switch_info *sw = hw->switch_info;
        enum ice_status status = 0;
        u8 i;
 
        for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
-               struct list_head *head = &sw->recp_list[i].filt_rules;
+               struct list_head *head;
 
-               status = ice_replay_fltr(hw, i, head);
+               head = &sw->recp_list[i].filt_replay_rules;
+               status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
                if (status)
                        return status;
        }
        return status;
 }
+
+/**
+ * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
+ * @hw: pointer to the hw struct
+ *
+ * Deletes the filter replay rules.
+ */
+void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
+{
+       struct ice_switch_info *sw = hw->switch_info;
+       u8 i;
+
+       if (!sw)
+               return;
+
+       for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+               if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
+                       struct list_head *l_head;
+
+                       l_head = &sw->recp_list[i].filt_replay_rules;
+                       ice_rem_sw_rule_info(hw, l_head);
+               }
+       }
+}
index 646389ca1238e85453b404fe5dbf96d29f8c898c..b88d96a1ef6935c2564e07e3443378b65f32b7ea 100644 (file)
@@ -17,7 +17,9 @@ struct ice_vsi_ctx {
        u16 vsis_unallocated;
        u16 flags;
        struct ice_aqc_vsi_props info;
+       struct ice_sched_vsi_info sched;
        u8 alloc_from_pool;
+       u8 vf_num;
 };
 
 enum ice_sw_fwd_act_type {
@@ -42,6 +44,14 @@ enum ice_sw_lkup_type {
        ICE_SW_LKUP_LAST
 };
 
+/* type of filter src id */
+enum ice_src_id {
+       ICE_SRC_ID_UNKNOWN = 0,
+       ICE_SRC_ID_VSI,
+       ICE_SRC_ID_QUEUE,
+       ICE_SRC_ID_LPORT,
+};
+
 struct ice_fltr_info {
        /* Look up information: how to look up packet */
        enum ice_sw_lkup_type lkup_type;
@@ -56,6 +66,7 @@ struct ice_fltr_info {
 
        /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
        u16 src;
+       enum ice_src_id src_id;
 
        union {
                struct {
@@ -77,7 +88,10 @@ struct ice_fltr_info {
                        u16 ethertype;
                        u8 mac_addr[ETH_ALEN]; /* optional */
                } ethertype_mac;
-       } l_data;
+       } l_data; /* Make sure to zero out the memory of l_data before using
+                  * it or only set the data associated with lookup match
+                  * rest everything should be zero
+                  */
 
        /* Depending on filter action */
        union {
@@ -85,12 +99,16 @@ struct ice_fltr_info {
                 * queue id in case of ICE_FWD_TO_QGRP.
                 */
                u16 q_id:11;
-               u16 vsi_id:10;
+               u16 hw_vsi_id:10;
                u16 vsi_list_id:10;
        } fwd_id;
 
+       /* Sw VSI handle */
+       u16 vsi_handle;
+
        /* Set to num_queues if action is ICE_FWD_TO_QGRP. This field
-        * determines the range of queues the packet needs to be forwarded to
+        * determines the range of queues the packet needs to be forwarded to.
+        * Note that qgrp_size must be set to a power of 2.
         */
        u8 qgrp_size;
 
@@ -109,6 +127,7 @@ struct ice_sw_recipe {
 
        /* List of type ice_fltr_mgmt_list_entry */
        struct list_head filt_rules;
+       struct list_head filt_replay_rules;
 
        /* linked list of type recipe_list_entry */
        struct list_head rg_list;
@@ -129,6 +148,8 @@ struct ice_vsi_list_map_info {
        struct list_head list_entry;
        DECLARE_BITMAP(vsi_map, ICE_MAX_VSI);
        u16 vsi_list_id;
+       /* counter to track how many rules are reusing this VSI list */
+       u16 ref_cnt;
 };
 
 struct ice_fltr_list_entry {
@@ -140,7 +161,8 @@ struct ice_fltr_list_entry {
 /* This defines an entry in the list that maintains MAC or VLAN membership
  * to HW list mapping, since multiple VSIs can subscribe to the same MAC or
  * VLAN. As an optimization the VSI list should be created only when a
- * second VSI becomes a subscriber to the VLAN address.
+ * second VSI becomes a subscriber to the same MAC address. VSI lists are always
+ * used for VLAN membership.
  */
 struct ice_fltr_mgmt_list_entry {
        /* back pointer to VSI list id to VSI list mapping */
@@ -158,28 +180,33 @@ struct ice_fltr_mgmt_list_entry {
 
 /* VSI related commands */
 enum ice_status
-ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
-                 struct ice_sq_cd *cd);
-enum ice_status
 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
            struct ice_sq_cd *cd);
 enum ice_status
 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
             bool keep_vsi_alloc, struct ice_sq_cd *cd);
+enum ice_status
+ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+              struct ice_sq_cd *cd);
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
+struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
 
 /* Switch/bridge related commands */
 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
 enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
 enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
-void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id);
+void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
 enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
 enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
 enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction);
-
-enum ice_status ice_replay_all_fltr(struct ice_hw *hw);
+ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
 
 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
+u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
+
+enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
+void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
 
 #endif /* _ICE_SWITCH_H_ */
index 839fd9ff604341f0d7e8cab9ec4408ab62cd1e88..1d0f58bd389bd35d9c5aad257e0d41c12c9ff1cd 100644 (file)
@@ -104,10 +104,17 @@ enum ice_rx_dtype {
 #define ICE_RX_ITR     ICE_IDX_ITR0
 #define ICE_TX_ITR     ICE_IDX_ITR1
 #define ICE_ITR_DYNAMIC        0x8000  /* use top bit as a flag */
-#define ICE_ITR_8K     0x003E
+#define ICE_ITR_8K     125
+#define ICE_ITR_20K    50
+#define ICE_DFLT_TX_ITR        ICE_ITR_20K
+#define ICE_DFLT_RX_ITR        ICE_ITR_20K
+/* apply ITR granularity translation to program the register. itr_gran is either
+ * 2 or 4 usecs so we need to divide by 2 first then shift by that value
+ */
+#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> \
+                                  ((itr_gran) / 2))
 
-/* apply ITR HW granularity translation to program the HW registers */
-#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran))
+#define ICE_DFLT_INTRL 0
 
 /* Legacy or Advanced Mode Queue */
 #define ICE_TX_ADVANCED        0
@@ -129,14 +136,6 @@ struct ice_ring {
        u16 q_index;                    /* Queue number of ring */
        u32 txq_teid;                   /* Added Tx queue TEID */
 
-       /* high bit set means dynamic, use accessor routines to read/write.
-        * hardware supports 2us/1us resolution for the ITR registers.
-        * these values always store the USER setting, and must be converted
-        * before programming to a register.
-        */
-       u16 rx_itr_setting;
-       u16 tx_itr_setting;
-
        u16 count;                      /* Number of descriptors */
        u16 reg_idx;                    /* HW register index of the ring */
 
@@ -173,6 +172,7 @@ struct ice_ring_container {
        unsigned int total_bytes;       /* total bytes processed this int */
        unsigned int total_pkts;        /* total packets processed this int */
        enum ice_latency_range latency_range;
+       int itr_idx;    /* index in the interrupt vector */
        u16 itr;
 };
 
index e681804be4d44cdfee210e4c7bc37dc4df039e65..12f9432abf11099f2c0eecb3bfe289ee801134f7 100644 (file)
@@ -18,6 +18,9 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
        return test_bit(tc, (unsigned long *)&bitmap);
 }
 
+/* Driver always calls main vsi_handle first */
+#define ICE_MAIN_VSI_HANDLE            0
+
 /* debug masks - set these bits in hw->debug_mask to control output */
 #define ICE_DBG_INIT           BIT_ULL(1)
 #define ICE_DBG_LINK           BIT_ULL(4)
@@ -81,6 +84,7 @@ enum ice_media_type {
 
 enum ice_vsi_type {
        ICE_VSI_PF = 0,
+       ICE_VSI_VF,
 };
 
 struct ice_link_status {
@@ -100,6 +104,15 @@ struct ice_link_status {
        u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
 };
 
+/* Different reset sources for which a disable queue AQ call has to be made in
+ * order to clean the TX scheduler as a part of the reset
+ */
+enum ice_disq_rst_src {
+       ICE_NO_RESET = 0,
+       ICE_VM_RESET,
+       ICE_VF_RESET,
+};
+
 /* PHY info such as phy_type, etc... */
 struct ice_phy_info {
        struct ice_link_status link_info;
@@ -124,6 +137,9 @@ struct ice_hw_common_caps {
        /* Max MTU for function or device */
        u16 max_mtu;
 
+       /* Virtualization support */
+       u8 sr_iov_1_1;                  /* SR-IOV enabled */
+
        /* RSS related capabilities */
        u16 rss_table_size;             /* 512 for PFs and 64 for VFs */
        u8 rss_table_entry_width;       /* RSS Entry width in bits */
@@ -132,12 +148,15 @@ struct ice_hw_common_caps {
 /* Function specific capabilities */
 struct ice_hw_func_caps {
        struct ice_hw_common_caps common_cap;
+       u32 num_allocd_vfs;             /* Number of allocated VFs */
+       u32 vf_base_id;                 /* Logical ID of the first VF */
        u32 guaranteed_num_vsi;
 };
 
 /* Device wide capabilities */
 struct ice_hw_dev_caps {
        struct ice_hw_common_caps common_cap;
+       u32 num_vfs_exposed;            /* Total number of VFs exposed */
        u32 num_vsi_allocd_to_host;     /* Excluding EMP VSI */
 };
 
@@ -147,12 +166,18 @@ struct ice_mac_info {
        u8 perm_addr[ETH_ALEN];
 };
 
-/* Various RESET request, These are not tied with HW reset types */
+/* Reset types used to determine which kind of reset was requested. These
+ * defines match what the RESET_TYPE field of the GLGEN_RSTAT register.
+ * ICE_RESET_PFR does not match any RESET_TYPE field in the GLGEN_RSTAT register
+ * because its reset source is different than the other types listed.
+ */
 enum ice_reset_req {
+       ICE_RESET_POR   = 0,
        ICE_RESET_INVAL = 0,
-       ICE_RESET_PFR   = 1,
-       ICE_RESET_CORER = 2,
-       ICE_RESET_GLOBR = 3,
+       ICE_RESET_CORER = 1,
+       ICE_RESET_GLOBR = 2,
+       ICE_RESET_EMPR  = 3,
+       ICE_RESET_PFR   = 4,
 };
 
 /* Bus parameters */
@@ -186,7 +211,7 @@ struct ice_sched_node {
        struct ice_sched_node **children;
        struct ice_aqc_txsched_elem_data info;
        u32 agg_id;                     /* aggregator group id */
-       u16 vsi_id;
+       u16 vsi_handle;
        u8 in_use;                      /* suspended or in use */
        u8 tx_sched_layer;              /* Logical Layer (1-9) */
        u8 num_children;
@@ -245,8 +270,6 @@ struct ice_port_info {
        struct ice_mac_info mac;
        struct ice_phy_info phy;
        struct mutex sched_lock;        /* protect access to TXSched tree */
-       struct ice_sched_tx_policy sched_policy;
-       struct list_head vsi_info_list;
        struct list_head agg_list;      /* lists all aggregator */
        u8 lport;
 #define ICE_LPORT_MASK         0xff
@@ -314,6 +337,7 @@ struct ice_hw {
 
        /* Control Queue info */
        struct ice_ctl_q_info adminq;
+       struct ice_ctl_q_info mailboxq;
 
        u8 api_branch;          /* API branch version */
        u8 api_maj_ver;         /* API major version */
@@ -326,16 +350,26 @@ struct ice_hw {
        u32 fw_build;           /* firmware build number */
 
        struct ice_fw_log_cfg fw_log;
-       /* minimum allowed value for different speeds */
-#define ICE_ITR_GRAN_MIN_200   1
-#define ICE_ITR_GRAN_MIN_100   1
-#define ICE_ITR_GRAN_MIN_50    2
-#define ICE_ITR_GRAN_MIN_25    4
+
+/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
+ * register. Used for determining the itr/intrl granularity during
+ * initialization.
+ */
+#define ICE_MAX_AGG_BW_200G    0x0
+#define ICE_MAX_AGG_BW_100G    0X1
+#define ICE_MAX_AGG_BW_50G     0x2
+#define ICE_MAX_AGG_BW_25G     0x3
+       /* ITR granularity for different speeds */
+#define ICE_ITR_GRAN_ABOVE_25  2
+#define ICE_ITR_GRAN_MAX_25    4
        /* ITR granularity in 1 us */
-       u8 itr_gran_200;
-       u8 itr_gran_100;
-       u8 itr_gran_50;
-       u8 itr_gran_25;
+       u8 itr_gran;
+       /* INTRL granularity for different speeds */
+#define ICE_INTRL_GRAN_ABOVE_25        4
+#define ICE_INTRL_GRAN_MAX_25  8
+       /* INTRL granularity in 1 us */
+       u8 intrl_gran;
+
        u8 ucast_shared;        /* true if VSIs can share unicast addr */
 
 };
@@ -409,4 +443,7 @@ struct ice_hw_port_stats {
 #define ICE_SR_SECTOR_SIZE_IN_WORDS    0x800
 #define ICE_SR_WORDS_IN_1KB            512
 
+/* Hash redirection LUT for VSI - maximum array size */
+#define ICE_VSIQF_HLUT_ARRAY_SIZE      ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
+
 #endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
new file mode 100644 (file)
index 0000000..c25e486
--- /dev/null
@@ -0,0 +1,2668 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+
+/**
+ * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
+ * @pf: pointer to the PF structure
+ * @v_opcode: operation code
+ * @v_retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ */
+static void
+ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
+                   enum ice_status v_retval, u8 *msg, u16 msglen)
+{
+       struct ice_hw *hw = &pf->hw;
+       struct ice_vf *vf = pf->vf;
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+               /* Not all vfs are enabled so skip the ones that are not */
+               if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+                   !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+                       continue;
+
+               /* Ignore return value on purpose - a given VF may fail, but
+                * we need to keep going and send to all of them
+                */
+               ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
+                                     msglen, NULL);
+       }
+}
+
+/**
+ * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
+ * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
+ * @link_up: whether or not to set the link up/down
+ */
+static void
+ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
+                int ice_link_speed, bool link_up)
+{
+       if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+               pfe->event_data.link_event_adv.link_status = link_up;
+               /* Speed in Mbps */
+               pfe->event_data.link_event_adv.link_speed =
+                       ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
+       } else {
+               pfe->event_data.link_event.link_status = link_up;
+               /* Legacy method for virtchnl link speeds */
+               pfe->event_data.link_event.link_speed =
+                       (enum virtchnl_link_speed)
+                       ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
+       }
+}
+
+/**
+ * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
+ * @link_up: whether or not to set the link up/down
+ */
+static void
+ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
+                       bool link_up)
+{
+       u16 link_speed;
+
+       if (link_up)
+               link_speed = ICE_AQ_LINK_SPEED_40GB;
+       else
+               link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
+
+       ice_set_pfe_link(vf, pfe, link_speed, link_up);
+}
+
+/**
+ * ice_vc_notify_vf_link_state - Inform a VF of link status
+ * @vf: pointer to the VF structure
+ *
+ * send a link status message to a single VF
+ */
+static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
+{
+       struct virtchnl_pf_event pfe = { 0 };
+       struct ice_link_status *ls;
+       struct ice_pf *pf = vf->pf;
+       struct ice_hw *hw;
+
+       hw = &pf->hw;
+       ls = &hw->port_info->phy.link_info;
+
+       pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+       pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+       if (vf->link_forced)
+               ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
+       else
+               ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
+                                ICE_AQ_LINK_UP);
+
+       ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+                             sizeof(pfe), NULL);
+}
+
+/**
+ * ice_get_vf_vector - get VF interrupt vector register offset
+ * @vf_msix: number of MSIx vector per VF on a PF
+ * @vf_id: VF identifier
+ * @i: index of MSIx vector
+ */
+static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i)
+{
+       return ((i == 0) ? VFINT_DYN_CTLN(vf_id) :
+                VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1)));
+}
+
+/**
+ * ice_free_vf_res - Free a VF's resources
+ * @vf: pointer to the VF info
+ */
+static void ice_free_vf_res(struct ice_vf *vf)
+{
+       struct ice_pf *pf = vf->pf;
+       int i, pf_vf_msix;
+
+       /* First, disable VF's configuration API to prevent OS from
+        * accessing the VF's VSI after it's freed or invalidated.
+        */
+       clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+       /* free vsi & disconnect it from the parent uplink */
+       if (vf->lan_vsi_idx) {
+               ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
+               vf->lan_vsi_idx = 0;
+               vf->lan_vsi_num = 0;
+               vf->num_mac = 0;
+       }
+
+       pf_vf_msix = pf->num_vf_msix;
+       /* Disable interrupts so that VF starts in a known state */
+       for (i = 0; i < pf_vf_msix; i++) {
+               u32 reg_idx;
+
+               reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i);
+               wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M);
+               ice_flush(&pf->hw);
+       }
+       /* reset some of the state variables keeping track of the resources */
+       clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+       clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+}
+
+/***********************enable_vf routines*****************************/
+
+/**
+ * ice_dis_vf_mappings
+ * @vf: pointer to the VF structure
+ */
+static void ice_dis_vf_mappings(struct ice_vf *vf)
+{
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+       int first, last, v;
+       struct ice_hw *hw;
+
+       hw = &pf->hw;
+       vsi = pf->vsi[vf->lan_vsi_idx];
+
+       wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
+
+       first = vf->first_vector_idx;
+       last = first + pf->num_vf_msix - 1;
+       for (v = first; v <= last; v++) {
+               u32 reg;
+
+               reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
+                       GLINT_VECT2FUNC_IS_PF_M) |
+                      ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+                       GLINT_VECT2FUNC_PF_NUM_M));
+               wr32(hw, GLINT_VECT2FUNC(v), reg);
+       }
+
+       if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
+               wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
+       else
+               dev_err(&pf->pdev->dev,
+                       "Scattered mode for VF Tx queues is not yet implemented\n");
+
+       if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
+               wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
+       else
+               dev_err(&pf->pdev->dev,
+                       "Scattered mode for VF Rx queues is not yet implemented\n");
+}
+
+/**
+ * ice_free_vfs - Free all VFs
+ * @pf: pointer to the PF structure
+ */
+void ice_free_vfs(struct ice_pf *pf)
+{
+       struct ice_hw *hw = &pf->hw;
+       int tmp, i;
+
+       if (!pf->vf)
+               return;
+
+       while (test_and_set_bit(__ICE_VF_DIS, pf->state))
+               usleep_range(1000, 2000);
+
+       /* Avoid wait time by stopping all VFs at the same time */
+       for (i = 0; i < pf->num_alloc_vfs; i++) {
+               if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
+                       continue;
+
+               /* stop rings without wait time */
+               ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
+                                     ICE_NO_RESET, i);
+               ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
+
+               clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
+       }
+
+       /* Disable IOV before freeing resources. This lets any VF drivers
+        * running in the host get themselves cleaned up before we yank
+        * the carpet out from underneath their feet.
+        */
+       if (!pci_vfs_assigned(pf->pdev))
+               pci_disable_sriov(pf->pdev);
+       else
+               dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
+       tmp = pf->num_alloc_vfs;
+       pf->num_vf_qps = 0;
+       pf->num_alloc_vfs = 0;
+       for (i = 0; i < tmp; i++) {
+               if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
+                       /* disable VF qp mappings */
+                       ice_dis_vf_mappings(&pf->vf[i]);
+
+                       /* Set this state so that assigned VF vectors can be
+                        * reclaimed by PF for reuse in ice_vsi_release(). No
+                        * need to clear this bit since pf->vf array is being
+                        * freed anyways after this for loop
+                        */
+                       set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
+                       ice_free_vf_res(&pf->vf[i]);
+               }
+       }
+
+       devm_kfree(&pf->pdev->dev, pf->vf);
+       pf->vf = NULL;
+
+       /* This check is for when the driver is unloaded while VFs are
+        * assigned. Setting the number of VFs to 0 through sysfs is caught
+        * before this function ever gets called.
+        */
+       if (!pci_vfs_assigned(pf->pdev)) {
+               int vf_id;
+
+               /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
+                * work correctly when SR-IOV gets re-enabled.
+                */
+               for (vf_id = 0; vf_id < tmp; vf_id++) {
+                       u32 reg_idx, bit_idx;
+
+                       reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+                       bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+                       wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+               }
+       }
+       clear_bit(__ICE_VF_DIS, pf->state);
+       clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+}
+
+/**
+ * ice_trigger_vf_reset - Reset a VF on HW
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
+ */
+static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
+{
+       struct ice_pf *pf = vf->pf;
+       u32 reg, reg_idx, bit_idx;
+       struct ice_hw *hw;
+       int vf_abs_id, i;
+
+       hw = &pf->hw;
+       vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+       /* Inform VF that it is no longer active, as a warning */
+       clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+       /* Disable VF's configuration API during reset. The flag is re-enabled
+        * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
+        * It's normally disabled in ice_free_vf_res(), but it's safer
+        * to do it earlier to give some time to finish to any VF config
+        * functions that may still be running at this point.
+        */
+       clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+       /* In the case of a VFLR, the HW has already reset the VF and we
+        * just need to clean up, so don't hit the VFRTRIG register.
+        */
+       if (!is_vflr) {
+               /* reset VF using VPGEN_VFRTRIG reg */
+               reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+               reg |= VPGEN_VFRTRIG_VFSWR_M;
+               wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+       }
+       /* clear the VFLR bit in GLGEN_VFLRSTAT */
+       reg_idx = (vf_abs_id) / 32;
+       bit_idx = (vf_abs_id) % 32;
+       wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+       ice_flush(hw);
+
+       wr32(hw, PF_PCI_CIAA,
+            VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
+       for (i = 0; i < 100; i++) {
+               reg = rd32(hw, PF_PCI_CIAD);
+               if ((reg & VF_TRANS_PENDING_M) != 0)
+                       dev_err(&pf->pdev->dev,
+                               "VF %d PCI transactions stuck\n", vf->vf_id);
+               udelay(1);
+       }
+}
+
+/**
+ * ice_vsi_set_pvid - Set port VLAN id for the VSI
+ * @vsi: the VSI being changed
+ * @vid: the VLAN id to set as a PVID
+ */
+static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
+{
+       struct device *dev = &vsi->back->pdev->dev;
+       struct ice_hw *hw = &vsi->back->hw;
+       struct ice_vsi_ctx ctxt = { 0 };
+       enum ice_status status;
+
+       ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
+                              ICE_AQ_VSI_PVLAN_INSERT_PVID |
+                              ICE_AQ_VSI_VLAN_EMOD_STR;
+       ctxt.info.pvid = cpu_to_le16(vid);
+       ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+       status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+       if (status) {
+               dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+                        status, hw->adminq.sq_last_status);
+               return -EIO;
+       }
+
+       vsi->info.pvid = ctxt.info.pvid;
+       vsi->info.vlan_flags = ctxt.info.vlan_flags;
+       return 0;
+}
+
+/**
+ * ice_vsi_kill_pvid - Remove port VLAN id from the VSI
+ * @vsi: the VSI being changed
+ */
+static int ice_vsi_kill_pvid(struct ice_vsi *vsi)
+{
+       struct ice_pf *pf = vsi->back;
+
+       if (ice_vsi_manage_vlan_stripping(vsi, false)) {
+               dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n",
+                       vsi->vsi_num);
+               return -ENODEV;
+       }
+
+       vsi->info.pvid = 0;
+       return 0;
+}
+
+/**
+ * ice_vf_vsi_setup - Set up a VF VSI
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ * @vf_id: defines VF id to which this VSI connects.
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+static struct ice_vsi *
+ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
+{
+       return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
+}
+
+/**
+ * ice_alloc_vsi_res - Setup VF VSI and its resources
+ * @vf: pointer to the VF structure
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int ice_alloc_vsi_res(struct ice_vf *vf)
+{
+       struct ice_pf *pf = vf->pf;
+       LIST_HEAD(tmp_add_list);
+       u8 broadcast[ETH_ALEN];
+       struct ice_vsi *vsi;
+       int status = 0;
+
+       vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
+
+       if (!vsi) {
+               dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
+               return -ENOMEM;
+       }
+
+       vf->lan_vsi_idx = vsi->idx;
+       vf->lan_vsi_num = vsi->vsi_num;
+
+       /* first vector index is the VFs OICR index */
+       vf->first_vector_idx = vsi->hw_base_vector;
+       /* Since hw_base_vector holds the vector where data queue interrupts
+        * starts, increment by 1 since VFs allocated vectors include OICR intr
+        * as well.
+        */
+       vsi->hw_base_vector += 1;
+
+       /* Check if port VLAN exist before, and restore it accordingly */
+       if (vf->port_vlan_id)
+               ice_vsi_set_pvid(vsi, vf->port_vlan_id);
+
+       eth_broadcast_addr(broadcast);
+
+       status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
+       if (status)
+               goto ice_alloc_vsi_res_exit;
+
+       if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
+               status = ice_add_mac_to_list(vsi, &tmp_add_list,
+                                            vf->dflt_lan_addr.addr);
+               if (status)
+                       goto ice_alloc_vsi_res_exit;
+       }
+
+       status = ice_add_mac(&pf->hw, &tmp_add_list);
+       if (status)
+               dev_err(&pf->pdev->dev, "could not add mac filters\n");
+
+       /* Clear this bit after VF initialization since we shouldn't reclaim
+        * and reassign interrupts for synchronous or asynchronous VFR events.
+        * We don't want to reconfigure interrupts since AVF driver doesn't
+        * expect vector assignment to be changed unless there is a request for
+        * more vectors.
+        */
+       clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
+ice_alloc_vsi_res_exit:
+       ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+       return status;
+}
+
+/**
+ * ice_alloc_vf_res - Allocate VF resources
+ * @vf: pointer to the VF structure
+ */
+static int ice_alloc_vf_res(struct ice_vf *vf)
+{
+       int status;
+
+       /* setup VF VSI and necessary resources */
+       status = ice_alloc_vsi_res(vf);
+       if (status)
+               goto ice_alloc_vf_res_exit;
+
+       if (vf->trusted)
+               set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+       else
+               clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+       /* VF is now completely initialized */
+       set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+       return status;
+
+ice_alloc_vf_res_exit:
+       ice_free_vf_res(vf);
+       return status;
+}
+
+/**
+ * ice_ena_vf_mappings
+ * @vf: pointer to the VF structure
+ *
+ * Enable VF vectors and queues allocation by writing the details into
+ * respective registers.
+ */
+static void ice_ena_vf_mappings(struct ice_vf *vf)
+{
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+       int first, last, v;
+       struct ice_hw *hw;
+       int abs_vf_id;
+       u32 reg;
+
+       hw = &pf->hw;
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       first = vf->first_vector_idx;
+       last = (first + pf->num_vf_msix) - 1;
+       abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+       /* VF Vector allocation */
+       reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
+              ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
+              VPINT_ALLOC_VALID_M);
+       wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
+
+       /* map the interrupts to its functions */
+       for (v = first; v <= last; v++) {
+               reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
+                       GLINT_VECT2FUNC_VF_NUM_M) |
+                      ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+                       GLINT_VECT2FUNC_PF_NUM_M));
+               wr32(hw, GLINT_VECT2FUNC(v), reg);
+       }
+
+       /* VF Tx queues allocation */
+       if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+               wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id),
+                    VPLAN_TXQ_MAPENA_TX_ENA_M);
+               /* set the VF PF Tx queue range
+                * VFNUMQ value should be set to (number of queues - 1). A value
+                * of 0 means 1 queue and a value of 255 means 256 queues
+                */
+               reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
+                       VPLAN_TX_QBASE_VFFIRSTQ_M) |
+                      (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
+                       VPLAN_TX_QBASE_VFNUMQ_M));
+               wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
+       } else {
+               dev_err(&pf->pdev->dev,
+                       "Scattered mode for VF Tx queues is not yet implemented\n");
+       }
+
+       /* VF Rx queues allocation */
+       if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+               wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id),
+                    VPLAN_RXQ_MAPENA_RX_ENA_M);
+               /* set the VF PF Rx queue range
+                * VFNUMQ value should be set to (number of queues - 1). A value
+                * of 0 means 1 queue and a value of 255 means 256 queues
+                */
+               reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
+                       VPLAN_RX_QBASE_VFFIRSTQ_M) |
+                      (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
+                       VPLAN_RX_QBASE_VFNUMQ_M));
+               wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
+       } else {
+               dev_err(&pf->pdev->dev,
+                       "Scattered mode for VF Rx queues is not yet implemented\n");
+       }
+}
+
+/**
+ * ice_determine_res
+ * @pf: pointer to the PF structure
+ * @avail_res: available resources in the PF structure
+ * @max_res: maximum resources that can be given per VF
+ * @min_res: minimum resources that can be given per VF
+ *
+ * Returns non-zero value if resources (queues/vectors) are available or
+ * returns zero if PF cannot accommodate for all num_alloc_vfs.
+ */
+static int
+ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
+{
+       bool checked_min_res = false;
+       int res;
+
+       /* start by checking if PF can assign max number of resources for
+        * all num_alloc_vfs.
+        * if yes, return number per VF
+        * If no, divide by 2 and roundup, check again
+        * repeat the loop till we reach a point where even minimum resources
+        * are not available, in that case return 0
+        */
+       res = max_res;
+       while ((res >= min_res) && !checked_min_res) {
+               int num_all_res;
+
+               num_all_res = pf->num_alloc_vfs * res;
+               if (num_all_res <= avail_res)
+                       return res;
+
+               if (res == min_res)
+                       checked_min_res = true;
+
+               res = DIV_ROUND_UP(res, 2);
+       }
+       return 0;
+}
+
+/**
+ * ice_check_avail_res - check if vectors and queues are available
+ * @pf: pointer to the PF structure
+ *
+ * This function is where we calculate actual number of resources for VF VSIs,
+ * we don't reserve ahead of time during probe. Returns success if vectors and
+ * queues resources are available, otherwise returns error code
+ */
+static int ice_check_avail_res(struct ice_pf *pf)
+{
+       u16 num_msix, num_txq, num_rxq;
+
+       if (!pf->num_alloc_vfs)
+               return -EINVAL;
+
+       /* Grab from HW interrupts common pool
+        * Note: By the time the user decides it needs more vectors in a VF
+        * its already too late since one must decide this prior to creating the
+        * VF interface. So the best we can do is take a guess as to what the
+        * user might want.
+        *
+        * We have two policies for vector allocation:
+        * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
+        * number of NFV VFs used for NFV appliances, since this is a special
+        * case, we try to assign maximum vectors per VF (65) as much as
+        * possible, based on determine_resources algorithm.
+        * 2. if num_alloc_vfs is from 17 to 256, then its large number of
+        * regular VFs which are not used for any special purpose. Hence try to
+        * grab default interrupt vectors (5 as supported by AVF driver).
+        */
+       if (pf->num_alloc_vfs <= 16) {
+               num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+                                            ICE_MAX_INTR_PER_VF,
+                                            ICE_MIN_INTR_PER_VF);
+       } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
+               num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+                                            ICE_DFLT_INTR_PER_VF,
+                                            ICE_MIN_INTR_PER_VF);
+       } else {
+               dev_err(&pf->pdev->dev,
+                       "Number of VFs %d exceeds max VF count %d\n",
+                       pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
+               return -EIO;
+       }
+
+       if (!num_msix)
+               return -EIO;
+
+       /* Grab from the common pool
+        * start by requesting Default queues (4 as supported by AVF driver),
+        * Note that, the main difference between queues and vectors is, latter
+        * can only be reserved at init time but queues can be requested by VF
+        * at runtime through Virtchnl, that is the reason we start by reserving
+        * few queues.
+        */
+       num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF,
+                                   ICE_MIN_QS_PER_VF);
+
+       num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF,
+                                   ICE_MIN_QS_PER_VF);
+
+       if (!num_txq || !num_rxq)
+               return -EIO;
+
+       /* since AVF driver works with only queue pairs which means, it expects
+        * to have equal number of Rx and Tx queues, so take the minimum of
+        * available Tx or Rx queues
+        */
+       pf->num_vf_qps = min_t(int, num_txq, num_rxq);
+       pf->num_vf_msix = num_msix;
+
+       return 0;
+}
+
+/**
+ * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
+ * @vf: pointer to the VF structure
+ *
+ * Cleanup a VF after the hardware reset is finished. Expects the caller to
+ * have verified whether the reset is finished properly, and ensure the
+ * minimum amount of wait time has passed. Reallocate VF resources back to make
+ * VF state active
+ */
+static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
+{
+       struct ice_pf *pf = vf->pf;
+       struct ice_hw *hw;
+       u32 reg;
+
+       hw = &pf->hw;
+
+       /* PF software completes the flow by notifying VF that reset flow is
+        * completed. This is done by enabling hardware by clearing the reset
+        * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
+        * register to VFR completed (done at the end of this function)
+        * By doing this we allow HW to access VF memory at any point. If we
+        * did it any sooner, HW could access memory while it was being freed
+        * in ice_free_vf_res(), causing an IOMMU fault.
+        *
+        * On the other hand, this needs to be done ASAP, because the VF driver
+        * is waiting for this to happen and may report a timeout. It's
+        * harmless, but it gets logged into Guest OS kernel log, so best avoid
+        * it.
+        */
+       reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+       reg &= ~VPGEN_VFRTRIG_VFSWR_M;
+       wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+
+       /* reallocate VF resources to finish resetting the VSI state */
+       if (!ice_alloc_vf_res(vf)) {
+               ice_ena_vf_mappings(vf);
+               set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+               clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+               vf->num_vlan = 0;
+       }
+
+       /* Tell the VF driver the reset is done. This needs to be done only
+        * after VF has been fully initialized, because the VF driver may
+        * request resources immediately after setting this flag.
+        */
+       wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+}
+
+/**
+ * ice_reset_all_vfs - reset all allocated VFs in one go
+ * @pf: pointer to the PF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * First, tell the hardware to reset each VF, then do all the waiting in one
+ * chunk, and finally finish restoring each VF after the wait. This is useful
+ * during PF routines which need to reset all VFs, as otherwise it must perform
+ * these resets in a serialized fashion.
+ *
+ * Returns true if any VFs were reset, and false otherwise.
+ */
+bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
+{
+       struct ice_hw *hw = &pf->hw;
+       int v, i;
+
+       /* If we don't have any VFs, then there is nothing to reset */
+       if (!pf->num_alloc_vfs)
+               return false;
+
+       /* If VFs have been disabled, there is no need to reset */
+       if (test_and_set_bit(__ICE_VF_DIS, pf->state))
+               return false;
+
+       /* Begin reset on all VFs at once */
+       for (v = 0; v < pf->num_alloc_vfs; v++)
+               ice_trigger_vf_reset(&pf->vf[v], is_vflr);
+
+       /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
+        * queues to inform Firmware about VF reset.
+        */
+       for (v = 0; v < pf->num_alloc_vfs; v++)
+               ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
+                               ICE_VF_RESET, v, NULL);
+
+       /* HW requires some time to make sure it can flush the FIFO for a VF
+        * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
+        * sequence to make sure that it has completed. We'll keep track of
+        * the VFs using a simple iterator that increments once that VF has
+        * finished resetting.
+        */
+       for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+               usleep_range(10000, 20000);
+
+               /* Check each VF in sequence */
+               while (v < pf->num_alloc_vfs) {
+                       struct ice_vf *vf = &pf->vf[v];
+                       u32 reg;
+
+                       reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
+                       if (!(reg & VPGEN_VFRSTAT_VFRD_M))
+                               break;
+
+                       /* If the current VF has finished resetting, move on
+                        * to the next VF in sequence.
+                        */
+                       v++;
+               }
+       }
+
+       /* Display a warning if at least one VF didn't manage to reset in
+        * time, but continue on with the operation.
+        */
+       if (v < pf->num_alloc_vfs)
+               dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
+       usleep_range(10000, 20000);
+
+       /* free VF resources to begin resetting the VSI state */
+       for (v = 0; v < pf->num_alloc_vfs; v++)
+               ice_free_vf_res(&pf->vf[v]);
+
+       if (ice_check_avail_res(pf)) {
+               dev_err(&pf->pdev->dev,
+                       "Cannot allocate VF resources, try with fewer number of VFs\n");
+               return false;
+       }
+
+       /* Finish the reset on each VF */
+       for (v = 0; v < pf->num_alloc_vfs; v++)
+               ice_cleanup_and_realloc_vf(&pf->vf[v]);
+
+       ice_flush(hw);
+       clear_bit(__ICE_VF_DIS, pf->state);
+
+       return true;
+}
+
+/**
+ * ice_reset_vf - Reset a particular VF
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * Returns true if the VF is reset, false otherwise.
+ */
+static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
+{
+       struct ice_pf *pf = vf->pf;
+       struct ice_hw *hw = &pf->hw;
+       bool rsd = false;
+       u32 reg;
+       int i;
+
+       /* If the VFs have been disabled, this means something else is
+        * resetting the VF, so we shouldn't continue.
+        */
+       if (test_and_set_bit(__ICE_VF_DIS, pf->state))
+               return false;
+
+       ice_trigger_vf_reset(vf, is_vflr);
+
+       if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+               ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET,
+                                     vf->vf_id);
+               ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]);
+               clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+       } else {
+               /* Call Disable LAN Tx queue AQ call even when queues are not
+                * enabled. This is needed for successful completiom of VFR
+                */
+               ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0,
+                               NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL);
+       }
+
+       /* poll VPGEN_VFRSTAT reg to make sure
+        * that reset is complete
+        */
+       for (i = 0; i < 10; i++) {
+               /* VF reset requires driver to first reset the VF and then
+                * poll the status register to make sure that the reset
+                * completed successfully.
+                */
+               usleep_range(10000, 20000);
+               reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
+               if (reg & VPGEN_VFRSTAT_VFRD_M) {
+                       rsd = true;
+                       break;
+               }
+       }
+
+       /* Display a warning if VF didn't manage to reset in time, but need to
+        * continue on with the operation.
+        */
+       if (!rsd)
+               dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+                        vf->vf_id);
+
+       usleep_range(10000, 20000);
+
+       /* free VF resources to begin resetting the VSI state */
+       ice_free_vf_res(vf);
+
+       ice_cleanup_and_realloc_vf(vf);
+
+       ice_flush(hw);
+       clear_bit(__ICE_VF_DIS, pf->state);
+
+       return true;
+}
+
+/**
+ * ice_vc_notify_link_state - Inform all VFs on a PF of link status
+ * @pf: pointer to the PF structure
+ */
+void ice_vc_notify_link_state(struct ice_pf *pf)
+{
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vfs; i++)
+               ice_vc_notify_vf_link_state(&pf->vf[i]);
+}
+
+/**
+ * ice_vc_notify_reset - Send pending reset message to all VFs
+ * @pf: pointer to the PF structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ */
+void ice_vc_notify_reset(struct ice_pf *pf)
+{
+       struct virtchnl_pf_event pfe;
+
+       if (!pf->num_alloc_vfs)
+               return;
+
+       pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+       pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+       ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS,
+                           (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
+}
+
+/**
+ * ice_vc_notify_vf_reset - Notify VF of a reset event
+ * @vf: pointer to the VF structure
+ */
+static void ice_vc_notify_vf_reset(struct ice_vf *vf)
+{
+       struct virtchnl_pf_event pfe;
+
+       /* validate the request */
+       if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+               return;
+
+       /* verify if the VF is in either init or active before proceeding */
+       if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+           !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+               return;
+
+       pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+       pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+       ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0,
+                             (u8 *)&pfe, sizeof(pfe), NULL);
+}
+
+/**
+ * ice_alloc_vfs - Allocate and set up VFs resources
+ * @pf: pointer to the PF structure
+ * @num_alloc_vfs: number of VFs to allocate
+ */
+static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
+{
+       struct ice_hw *hw = &pf->hw;
+       struct ice_vf *vfs;
+       int i, ret;
+
+       /* Disable global interrupt 0 so we don't try to handle the VFLR. */
+       wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
+            ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+
+       ice_flush(hw);
+
+       ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+       if (ret) {
+               pf->num_alloc_vfs = 0;
+               goto err_unroll_intr;
+       }
+       /* allocate memory */
+       vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
+                          GFP_KERNEL);
+       if (!vfs) {
+               ret = -ENOMEM;
+               goto err_unroll_sriov;
+       }
+       pf->vf = vfs;
+
+       /* apply default profile */
+       for (i = 0; i < num_alloc_vfs; i++) {
+               vfs[i].pf = pf;
+               vfs[i].vf_sw_id = pf->first_sw;
+               vfs[i].vf_id = i;
+
+               /* assign default capabilities */
+               set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+               vfs[i].spoofchk = true;
+
+               /* Set this state so that PF driver does VF vector assignment */
+               set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
+       }
+       pf->num_alloc_vfs = num_alloc_vfs;
+
+       /* VF resources get allocated during reset */
+       if (!ice_reset_all_vfs(pf, false))
+               goto err_unroll_sriov;
+
+       goto err_unroll_intr;
+
+err_unroll_sriov:
+       pci_disable_sriov(pf->pdev);
+err_unroll_intr:
+       /* rearm interrupts here */
+       ice_irq_dynamic_ena(hw, NULL, NULL);
+       return ret;
+}
+
+/**
+ * ice_pf_state_is_nominal - checks the pf for nominal state
+ * @pf: pointer to pf to check
+ *
+ * Check the PF's state for a collection of bits that would indicate
+ * the PF is in a state that would inhibit normal operation for
+ * driver functionality.
+ *
+ * Returns true if PF is in a nominal state.
+ * Returns false otherwise
+ */
+static bool ice_pf_state_is_nominal(struct ice_pf *pf)
+{
+       DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
+
+       if (!pf)
+               return false;
+
+       bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
+       if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
+               return false;
+
+       return true;
+}
+
+/**
+ * ice_pci_sriov_ena - Enable or change number of VFs
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to allocate
+ */
+static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
+{
+       int pre_existing_vfs = pci_num_vf(pf->pdev);
+       struct device *dev = &pf->pdev->dev;
+       int err;
+
+       if (!ice_pf_state_is_nominal(pf)) {
+               dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
+               return -EBUSY;
+       }
+
+       if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
+               dev_err(dev, "This device is not capable of SR-IOV\n");
+               return -ENODEV;
+       }
+
+       if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+               ice_free_vfs(pf);
+       else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+               return num_vfs;
+
+       if (num_vfs > pf->num_vfs_supported) {
+               dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
+                       num_vfs, pf->num_vfs_supported);
+               return -ENOTSUPP;
+       }
+
+       dev_info(dev, "Allocating %d VFs\n", num_vfs);
+       err = ice_alloc_vfs(pf, num_vfs);
+       if (err) {
+               dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
+               return err;
+       }
+
+       set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+       return num_vfs;
+}
+
+/**
+ * ice_sriov_configure - Enable or change number of VFs via sysfs
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * This function is called when the user updates the number of VFs in sysfs.
+ */
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+       struct ice_pf *pf = pci_get_drvdata(pdev);
+
+       if (num_vfs)
+               return ice_pci_sriov_ena(pf, num_vfs);
+
+       if (!pci_vfs_assigned(pdev)) {
+               ice_free_vfs(pf);
+       } else {
+               dev_err(&pf->pdev->dev,
+                       "can't free VFs because some are assigned to VMs.\n");
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+/**
+ * ice_process_vflr_event - Free VF resources via IRQ calls
+ * @pf: pointer to the PF structure
+ *
+ * called from the VLFR IRQ handler to
+ * free up VF resources and state variables
+ */
+void ice_process_vflr_event(struct ice_pf *pf)
+{
+       struct ice_hw *hw = &pf->hw;
+       int vf_id;
+       u32 reg;
+
+       if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+           !pf->num_alloc_vfs)
+               return;
+
+       /* Re-enable the VFLR interrupt cause here, before looking for which
+        * VF got reset. Otherwise, if another VF gets a reset while the
+        * first one is being processed, that interrupt will be lost, and
+        * that VF will be stuck in reset forever.
+        */
+       reg = rd32(hw, PFINT_OICR_ENA);
+       reg |= PFINT_OICR_VFLR_M;
+       wr32(hw, PFINT_OICR_ENA, reg);
+       ice_flush(hw);
+
+       clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+       for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+               struct ice_vf *vf = &pf->vf[vf_id];
+               u32 reg_idx, bit_idx;
+
+               reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+               bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+               /* read GLGEN_VFLRSTAT register to find out the flr VFs */
+               reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
+               if (reg & BIT(bit_idx))
+                       /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+                       ice_reset_vf(vf, true);
+       }
+}
+
+/**
+ * ice_vc_dis_vf - Disable a given VF via SW reset
+ * @vf: pointer to the VF info
+ *
+ * Disable the VF through a SW reset
+ */
+static void ice_vc_dis_vf(struct ice_vf *vf)
+{
+       ice_vc_notify_vf_reset(vf);
+       ice_reset_vf(vf, false);
+}
+
+/**
+ * ice_vc_send_msg_to_vf - Send message to VF
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ */
+static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+                                enum ice_status v_retval, u8 *msg, u16 msglen)
+{
+       enum ice_status aq_ret;
+       struct ice_pf *pf;
+
+       /* validate the request */
+       if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+               return -EINVAL;
+
+       pf = vf->pf;
+
+       /* single place to detect unsuccessful return values */
+       if (v_retval) {
+               vf->num_inval_msgs++;
+               dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
+                        vf->vf_id, v_opcode, v_retval);
+               if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
+                       dev_err(&pf->pdev->dev,
+                               "Number of invalid messages exceeded for VF %d\n",
+                               vf->vf_id);
+                       dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
+                       set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+                       return -EIO;
+               }
+       } else {
+               vf->num_valid_msgs++;
+               /* reset the invalid counter, if a valid message is received. */
+               vf->num_inval_msgs = 0;
+       }
+
+       aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
+                                      msg, msglen, NULL);
+       if (aq_ret) {
+               dev_info(&pf->pdev->dev,
+                        "Unable to send the message to VF %d aq_err %d\n",
+                        vf->vf_id, pf->hw.mailboxq.sq_last_status);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/**
+ * ice_vc_get_ver_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request the API version used by the PF
+ */
+static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
+{
+       struct virtchnl_version_info info = {
+               VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
+       };
+
+       vf->vf_ver = *(struct virtchnl_version_info *)msg;
+       /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+       if (VF_IS_V10(&vf->vf_ver))
+               info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS,
+                                    (u8 *)&info,
+                                    sizeof(struct virtchnl_version_info));
+}
+
+/**
+ * ice_vc_get_vf_res_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request its resources
+ */
+static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+{
+       struct virtchnl_vf_resource *vfres = NULL;
+       enum ice_status aq_ret = 0;
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+       int len = 0;
+       int ret;
+
+       if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto err;
+       }
+
+       len = sizeof(struct virtchnl_vf_resource);
+
+       vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
+       if (!vfres) {
+               aq_ret = ICE_ERR_NO_MEMORY;
+               len = 0;
+               goto err;
+       }
+       if (VF_IS_V11(&vf->vf_ver))
+               vf->driver_caps = *(u32 *)msg;
+       else
+               vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+                                 VIRTCHNL_VF_OFFLOAD_RSS_REG |
+                                 VIRTCHNL_VF_OFFLOAD_VLAN;
+
+       vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       if (!vsi->info.pvid)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
+       } else {
+               if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+                       vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+               else
+                       vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
+       }
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+
+       if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+
+       if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+               vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+
+       vfres->num_vsis = 1;
+       /* Tx and Rx queue are equal for VF */
+       vfres->num_queue_pairs = vsi->num_txq;
+       vfres->max_vectors = pf->num_vf_msix;
+       vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
+       vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+
+       vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+       vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+       vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
+       ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
+                       vf->dflt_lan_addr.addr);
+
+       set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+err:
+       /* send the response back to the VF */
+       ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret,
+                                   (u8 *)vfres, len);
+
+       devm_kfree(&pf->pdev->dev, vfres);
+       return ret;
+}
+
+/**
+ * ice_vc_reset_vf_msg
+ * @vf: pointer to the VF info
+ *
+ * called from the VF to reset itself,
+ * unlike other virtchnl messages, PF driver
+ * doesn't send the response back to the VF
+ */
+static void ice_vc_reset_vf_msg(struct ice_vf *vf)
+{
+       if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+               ice_reset_vf(vf, false);
+}
+
+/**
+ * ice_find_vsi_from_id
+ * @pf: the pf structure to search for the VSI
+ * @id: id of the VSI it is searching for
+ *
+ * searches for the VSI with the given id
+ */
+static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
+{
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vsi; i++)
+               if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
+                       return pf->vsi[i];
+
+       return NULL;
+}
+
+/**
+ * ice_vc_isvalid_vsi_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VF relative VSI id
+ *
+ * check for the valid VSI id
+ */
+static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+{
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+
+       vsi = ice_find_vsi_from_id(pf, vsi_id);
+
+       return (vsi && (vsi->vf_id == vf->vf_id));
+}
+
+/**
+ * ice_vc_isvalid_q_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI id
+ * @qid: VSI relative queue id
+ *
+ * check for the valid queue id
+ */
+static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
+{
+       struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+       /* allocated Tx and Rx queues should be always equal for VF VSI */
+       return (vsi && (qid < vsi->alloc_txq));
+}
+
+/**
+ * ice_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ */
+static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
+{
+       struct virtchnl_rss_key *vrk =
+               (struct virtchnl_rss_key *)msg;
+       struct ice_vsi *vsi = NULL;
+       enum ice_status aq_ret;
+       int ret;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id);
+       if (!vsi) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       ret = ice_set_rss(vsi, vrk->key, NULL, 0);
+       aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+error_param:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret,
+                                    NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ */
+static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
+{
+       struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+       struct ice_vsi *vsi = NULL;
+       enum ice_status aq_ret;
+       int ret;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id);
+       if (!vsi) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE);
+       aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+error_param:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret,
+                                    NULL, 0);
+}
+
+/**
+ * ice_vc_get_stats_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to get VSI stats
+ */
+static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
+{
+       struct virtchnl_queue_select *vqs =
+               (struct virtchnl_queue_select *)msg;
+       enum ice_status aq_ret = 0;
+       struct ice_eth_stats stats;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+       if (!vsi) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       memset(&stats, 0, sizeof(struct ice_eth_stats));
+       ice_update_eth_stats(vsi);
+
+       stats = vsi->eth_stats;
+
+error_param:
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
+                                    (u8 *)&stats, sizeof(stats));
+}
+
+/**
+ * ice_vc_ena_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ */
+static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+       struct virtchnl_queue_select *vqs =
+           (struct virtchnl_queue_select *)msg;
+       enum ice_status aq_ret = 0;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!vqs->rx_queues && !vqs->tx_queues) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+       if (!vsi) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       /* Enable only Rx rings, Tx rings were enabled by the FW when the
+        * Tx queue group list was configured and the context bits were
+        * programmed using ice_vsi_cfg_txqs
+        */
+       if (ice_vsi_start_rx_rings(vsi))
+               aq_ret = ICE_ERR_PARAM;
+
+       /* Set flag to indicate that queues are enabled */
+       if (!aq_ret)
+               set_bit(ICE_VF_STATE_ENA, vf->vf_states);
+
+error_param:
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret,
+                                    NULL, 0);
+}
+
+/**
+ * ice_vc_dis_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific
+ * queue(s)
+ */
+static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+       struct virtchnl_queue_select *vqs =
+           (struct virtchnl_queue_select *)msg;
+       enum ice_status aq_ret = 0;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
+           !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!vqs->rx_queues && !vqs->tx_queues) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+       if (!vsi) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
+               dev_err(&vsi->back->pdev->dev,
+                       "Failed to stop tx rings on VSI %d\n",
+                       vsi->vsi_num);
+               aq_ret = ICE_ERR_PARAM;
+       }
+
+       if (ice_vsi_stop_rx_rings(vsi)) {
+               dev_err(&vsi->back->pdev->dev,
+                       "Failed to stop rx rings on VSI %d\n",
+                       vsi->vsi_num);
+               aq_ret = ICE_ERR_PARAM;
+       }
+
+       /* Clear enabled queues flag */
+       if (!aq_ret)
+               clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+
+error_param:
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret,
+                                    NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the IRQ to queue map
+ */
+static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
+{
+       struct virtchnl_irq_map_info *irqmap_info =
+           (struct virtchnl_irq_map_info *)msg;
+       u16 vsi_id, vsi_q_id, vector_id;
+       struct virtchnl_vector_map *map;
+       struct ice_vsi *vsi = NULL;
+       struct ice_pf *pf = vf->pf;
+       enum ice_status aq_ret = 0;
+       unsigned long qmap;
+       int i;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       for (i = 0; i < irqmap_info->num_vectors; i++) {
+               map = &irqmap_info->vecmap[i];
+
+               vector_id = map->vector_id;
+               vsi_id = map->vsi_id;
+               /* validate msg params */
+               if (!(vector_id < pf->hw.func_caps.common_cap
+                   .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) {
+                       aq_ret = ICE_ERR_PARAM;
+                       goto error_param;
+               }
+
+               vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+               if (!vsi) {
+                       aq_ret = ICE_ERR_PARAM;
+                       goto error_param;
+               }
+
+               /* lookout for the invalid queue index */
+               qmap = map->rxq_map;
+               for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
+                       struct ice_q_vector *q_vector;
+
+                       if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
+                               aq_ret = ICE_ERR_PARAM;
+                               goto error_param;
+                       }
+                       q_vector = vsi->q_vectors[i];
+                       q_vector->num_ring_rx++;
+                       q_vector->rx.itr_idx = map->rxitr_idx;
+                       vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+               }
+
+               qmap = map->txq_map;
+               for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
+                       struct ice_q_vector *q_vector;
+
+                       if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
+                               aq_ret = ICE_ERR_PARAM;
+                               goto error_param;
+                       }
+                       q_vector = vsi->q_vectors[i];
+                       q_vector->num_ring_tx++;
+                       q_vector->tx.itr_idx = map->txitr_idx;
+                       vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+               }
+       }
+
+       if (vsi)
+               ice_vsi_cfg_msix(vsi);
+error_param:
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret,
+                                    NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the Rx/Tx queues
+ */
+static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+       struct virtchnl_vsi_queue_config_info *qci =
+           (struct virtchnl_vsi_queue_config_info *)msg;
+       struct virtchnl_queue_pair_info *qpi;
+       enum ice_status aq_ret = 0;
+       struct ice_vsi *vsi;
+       int i;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id);
+       if (!vsi) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       for (i = 0; i < qci->num_queue_pairs; i++) {
+               qpi = &qci->qpair[i];
+               if (qpi->txq.vsi_id != qci->vsi_id ||
+                   qpi->rxq.vsi_id != qci->vsi_id ||
+                   qpi->rxq.queue_id != qpi->txq.queue_id ||
+                   !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
+                       aq_ret = ICE_ERR_PARAM;
+                       goto error_param;
+               }
+               /* copy Tx queue info from VF into VSI */
+               vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
+               vsi->tx_rings[i]->count = qpi->txq.ring_len;
+               /* copy Rx queue info from VF into vsi */
+               vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
+               vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+               if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
+                       aq_ret = ICE_ERR_PARAM;
+                       goto error_param;
+               }
+               vsi->rx_buf_len = qpi->rxq.databuffer_size;
+               if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
+                   qpi->rxq.max_pkt_size < 64) {
+                       aq_ret = ICE_ERR_PARAM;
+                       goto error_param;
+               }
+               vsi->max_frame = qpi->rxq.max_pkt_size;
+       }
+
+       /* VF can request to configure less than allocated queues
+        * or default allocated queues. So update the VSI with new number
+        */
+       vsi->num_txq = qci->num_queue_pairs;
+       vsi->num_rxq = qci->num_queue_pairs;
+
+       if (!ice_vsi_cfg_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
+               aq_ret = 0;
+       else
+               aq_ret = ICE_ERR_PARAM;
+
+error_param:
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret,
+                                    NULL, 0);
+}
+
+/**
+ * ice_is_vf_trusted
+ * @vf: pointer to the VF info
+ */
+static bool ice_is_vf_trusted(struct ice_vf *vf)
+{
+       return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_can_vf_change_mac
+ * @vf: pointer to the VF info
+ *
+ * Return true if the VF is allowed to change its MAC filters, false otherwise
+ */
+static bool ice_can_vf_change_mac(struct ice_vf *vf)
+{
+       /* If the VF MAC address has been set administratively (via the
+        * ndo_set_vf_mac command), then deny permission to the VF to
+        * add/delete unicast MAC addresses, unless the VF is trusted
+        */
+       if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
+               return false;
+
+       return true;
+}
+
+/**
+ * ice_vc_handle_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @set: true if mac filters are being set, false otherwise
+ *
+ * add guest mac address filter
+ */
+static int
+ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
+{
+       struct virtchnl_ether_addr_list *al =
+           (struct virtchnl_ether_addr_list *)msg;
+       struct ice_pf *pf = vf->pf;
+       enum virtchnl_ops vc_op;
+       enum ice_status ret;
+       LIST_HEAD(mac_list);
+       struct ice_vsi *vsi;
+       int mac_count = 0;
+       int i;
+
+       if (set)
+               vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
+       else
+               vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+           !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+               ret = ICE_ERR_PARAM;
+               goto handle_mac_exit;
+       }
+
+       if (set && !ice_is_vf_trusted(vf) &&
+           (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
+               dev_err(&pf->pdev->dev,
+                       "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n");
+               ret = ICE_ERR_PARAM;
+               goto handle_mac_exit;
+       }
+
+       vsi = pf->vsi[vf->lan_vsi_idx];
+
+       for (i = 0; i < al->num_elements; i++) {
+               u8 *maddr = al->list[i].addr;
+
+               if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
+                   is_broadcast_ether_addr(maddr)) {
+                       if (set) {
+                               /* VF is trying to add filters that the PF
+                                * already added. Just continue.
+                                */
+                               dev_info(&pf->pdev->dev,
+                                        "mac %pM already set for VF %d\n",
+                                        maddr, vf->vf_id);
+                               continue;
+                       } else {
+                               /* VF can't remove dflt_lan_addr/bcast mac */
+                               dev_err(&pf->pdev->dev,
+                                       "can't remove mac %pM for VF %d\n",
+                                       maddr, vf->vf_id);
+                               ret = ICE_ERR_PARAM;
+                               goto handle_mac_exit;
+                       }
+               }
+
+               /* check for the invalid cases and bail if necessary */
+               if (is_zero_ether_addr(maddr)) {
+                       dev_err(&pf->pdev->dev,
+                               "invalid mac %pM provided for VF %d\n",
+                               maddr, vf->vf_id);
+                       ret = ICE_ERR_PARAM;
+                       goto handle_mac_exit;
+               }
+
+               if (is_unicast_ether_addr(maddr) &&
+                   !ice_can_vf_change_mac(vf)) {
+                       dev_err(&pf->pdev->dev,
+                               "can't change unicast mac for untrusted VF %d\n",
+                               vf->vf_id);
+                       ret = ICE_ERR_PARAM;
+                       goto handle_mac_exit;
+               }
+
+               /* get here if maddr is multicast or if VF can change mac */
+               if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
+                       ret = ICE_ERR_NO_MEMORY;
+                       goto handle_mac_exit;
+               }
+               mac_count++;
+       }
+
+       /* program the updated filter list */
+       if (set)
+               ret = ice_add_mac(&pf->hw, &mac_list);
+       else
+               ret = ice_remove_mac(&pf->hw, &mac_list);
+
+       if (ret) {
+               dev_err(&pf->pdev->dev,
+                       "can't update mac filters for VF %d, error %d\n",
+                       vf->vf_id, ret);
+       } else {
+               if (set)
+                       vf->num_mac += mac_count;
+               else
+                       vf->num_mac -= mac_count;
+       }
+
+handle_mac_exit:
+       ice_free_fltr_list(&pf->pdev->dev, &mac_list);
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0);
+}
+
+/**
+ * ice_vc_add_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * add guest MAC address filter
+ */
+static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+       return ice_vc_handle_mac_addr_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_del_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove guest MAC address filter
+ */
+static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+       return ice_vc_handle_mac_addr_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_request_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number.  If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queue pairs via virtchnl message response to VF.
+ */
+static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+       struct virtchnl_vf_res_request *vfres =
+               (struct virtchnl_vf_res_request *)msg;
+       int req_queues = vfres->num_queue_pairs;
+       enum ice_status aq_ret = 0;
+       struct ice_pf *pf = vf->pf;
+       int tx_rx_queue_left;
+       int cur_queues;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       cur_queues = pf->num_vf_qps;
+       tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+       if (req_queues <= 0) {
+               dev_err(&pf->pdev->dev,
+                       "VF %d tried to request %d queues.  Ignoring.\n",
+                       vf->vf_id, req_queues);
+       } else if (req_queues > ICE_MAX_QS_PER_VF) {
+               dev_err(&pf->pdev->dev,
+                       "VF %d tried to request more than %d queues.\n",
+                       vf->vf_id, ICE_MAX_QS_PER_VF);
+               vfres->num_queue_pairs = ICE_MAX_QS_PER_VF;
+       } else if (req_queues - cur_queues > tx_rx_queue_left) {
+               dev_warn(&pf->pdev->dev,
+                        "VF %d requested %d more queues, but only %d left.\n",
+                        vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
+               vfres->num_queue_pairs = tx_rx_queue_left + cur_queues;
+       } else {
+               /* request is successful, then reset VF */
+               vf->num_req_qs = req_queues;
+               ice_vc_dis_vf(vf);
+               dev_info(&pf->pdev->dev,
+                        "VF %d granted request of %d queues.\n",
+                        vf->vf_id, req_queues);
+               return 0;
+       }
+
+error_param:
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
+                                    aq_ret, (u8 *)vfres, sizeof(*vfres));
+}
+
+/**
+ * ice_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: VLAN id being set
+ * @qos: priority setting
+ * @vlan_proto: VLAN protocol
+ *
+ * program VF Port VLAN id and/or qos
+ */
+int
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+                    __be16 vlan_proto)
+{
+       u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_pf *pf = np->vsi->back;
+       struct ice_vsi *vsi;
+       struct ice_vf *vf;
+       int ret = 0;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+               return -EINVAL;
+       }
+
+       if (vlan_id > ICE_MAX_VLANID || qos > 7) {
+               dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+               return -EINVAL;
+       }
+
+       if (vlan_proto != htons(ETH_P_8021Q)) {
+               dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+               return -EPROTONOSUPPORT;
+       }
+
+       vf = &pf->vf[vf_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+               dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+               return -EBUSY;
+       }
+
+       if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
+               /* duplicate request, so just return success */
+               dev_info(&pf->pdev->dev,
+                        "Duplicate pvid %d request\n", vlanprio);
+               return ret;
+       }
+
+       /* If pvid, then remove all filters on the old VLAN */
+       if (vsi->info.pvid)
+               ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
+                                 VLAN_VID_MASK));
+
+       if (vlan_id || qos) {
+               ret = ice_vsi_set_pvid(vsi, vlanprio);
+               if (ret)
+                       goto error_set_pvid;
+       } else {
+               ice_vsi_kill_pvid(vsi);
+       }
+
+       if (vlan_id) {
+               dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+                        vlan_id, qos, vf_id);
+
+               /* add new VLAN filter for each MAC */
+               ret = ice_vsi_add_vlan(vsi, vlan_id);
+               if (ret)
+                       goto error_set_pvid;
+       }
+
+       /* The Port VLAN needs to be saved across resets the same as the
+        * default LAN MAC address.
+        */
+       vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+
+error_set_pvid:
+       return ret;
+}
+
+/**
+ * ice_vc_process_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @add_v: Add VLAN if true, otherwise delete VLAN
+ *
+ * Process virtchnl op to add or remove programmed guest VLAN id
+ */
+static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
+{
+       struct virtchnl_vlan_filter_list *vfl =
+           (struct virtchnl_vlan_filter_list *)msg;
+       enum ice_status aq_ret = 0;
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+       int i;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (add_v && !ice_is_vf_trusted(vf) &&
+           vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+               dev_info(&pf->pdev->dev,
+                        "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n");
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       for (i = 0; i < vfl->num_elements; i++) {
+               if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
+                       aq_ret = ICE_ERR_PARAM;
+                       dev_err(&pf->pdev->dev,
+                               "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+                       goto error_param;
+               }
+       }
+
+       vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id);
+       if (!vsi) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (vsi->info.pvid) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (ice_vsi_manage_vlan_stripping(vsi, add_v)) {
+               dev_err(&pf->pdev->dev,
+                       "%sable VLAN stripping failed for VSI %i\n",
+                        add_v ? "en" : "dis", vsi->vsi_num);
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (add_v) {
+               for (i = 0; i < vfl->num_elements; i++) {
+                       u16 vid = vfl->vlan_id[i];
+
+                       if (!ice_vsi_add_vlan(vsi, vid)) {
+                               vf->num_vlan++;
+                               set_bit(vid, vsi->active_vlans);
+
+                               /* Enable VLAN pruning when VLAN 0 is added */
+                               if (unlikely(!vid))
+                                       if (ice_cfg_vlan_pruning(vsi, true))
+                                               aq_ret = ICE_ERR_PARAM;
+                       } else {
+                               aq_ret = ICE_ERR_PARAM;
+                       }
+               }
+       } else {
+               for (i = 0; i < vfl->num_elements; i++) {
+                       u16 vid = vfl->vlan_id[i];
+
+                       /* Make sure ice_vsi_kill_vlan is successful before
+                        * updating VLAN information
+                        */
+                       if (!ice_vsi_kill_vlan(vsi, vid)) {
+                               vf->num_vlan--;
+                               clear_bit(vid, vsi->active_vlans);
+
+                               /* Disable VLAN pruning when removing VLAN 0 */
+                               if (unlikely(!vid))
+                                       ice_cfg_vlan_pruning(vsi, false);
+                       }
+               }
+       }
+
+error_param:
+       /* send the response to the VF */
+       if (add_v)
+               return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret,
+                                            NULL, 0);
+       else
+               return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret,
+                                            NULL, 0);
+}
+
+/**
+ * ice_vc_add_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Add and program guest VLAN id
+ */
+static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+       return ice_vc_process_vlan_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_remove_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove programmed guest VLAN id
+ */
+static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+       return ice_vc_process_vlan_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_ena_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Enable VLAN header stripping for a given VF
+ */
+static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
+{
+       enum ice_status aq_ret = 0;
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       if (ice_vsi_manage_vlan_stripping(vsi, true))
+               aq_ret = ICE_ERR_AQ_ERROR;
+
+error_param:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+                                    aq_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Disable VLAN header stripping for a given VF
+ */
+static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
+{
+       enum ice_status aq_ret = 0;
+       struct ice_pf *pf = vf->pf;
+       struct ice_vsi *vsi;
+
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+               aq_ret = ICE_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       if (ice_vsi_manage_vlan_stripping(vsi, false))
+               aq_ret = ICE_ERR_AQ_ERROR;
+
+error_param:
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+                                    aq_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_process_vf_msg - Process request from VF
+ * @pf: pointer to the PF structure
+ * @event: pointer to the AQ event
+ *
+ * called from the common asq/arq handler to
+ * process request from VF
+ */
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+       u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
+       s16 vf_id = le16_to_cpu(event->desc.retval);
+       u16 msglen = event->msg_len;
+       u8 *msg = event->msg_buf;
+       struct ice_vf *vf = NULL;
+       int err = 0;
+
+       if (vf_id >= pf->num_alloc_vfs) {
+               err = -EINVAL;
+               goto error_handler;
+       }
+
+       vf = &pf->vf[vf_id];
+
+       /* Check if VF is disabled. */
+       if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
+               err = -EPERM;
+               goto error_handler;
+       }
+
+       /* Perform basic checks on the msg */
+       err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
+       if (err) {
+               if (err == VIRTCHNL_ERR_PARAM)
+                       err = -EPERM;
+               else
+                       err = -EINVAL;
+               goto error_handler;
+       }
+
+       /* Perform additional checks specific to RSS and Virtchnl */
+       if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
+               struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
+
+               if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE)
+                       err = -EINVAL;
+       } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
+               struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+
+               if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE)
+                       err = -EINVAL;
+       }
+
+error_handler:
+       if (err) {
+               ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0);
+               dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
+                       vf_id, v_opcode, msglen, err);
+               return;
+       }
+
+       switch (v_opcode) {
+       case VIRTCHNL_OP_VERSION:
+               err = ice_vc_get_ver_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_GET_VF_RESOURCES:
+               err = ice_vc_get_vf_res_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_RESET_VF:
+               ice_vc_reset_vf_msg(vf);
+               break;
+       case VIRTCHNL_OP_ADD_ETH_ADDR:
+               err = ice_vc_add_mac_addr_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_DEL_ETH_ADDR:
+               err = ice_vc_del_mac_addr_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+               err = ice_vc_cfg_qs_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_ENABLE_QUEUES:
+               err = ice_vc_ena_qs_msg(vf, msg);
+               ice_vc_notify_vf_link_state(vf);
+               break;
+       case VIRTCHNL_OP_DISABLE_QUEUES:
+               err = ice_vc_dis_qs_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_REQUEST_QUEUES:
+               err = ice_vc_request_qs_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+               err = ice_vc_cfg_irq_map_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_CONFIG_RSS_KEY:
+               err = ice_vc_config_rss_key(vf, msg);
+               break;
+       case VIRTCHNL_OP_CONFIG_RSS_LUT:
+               err = ice_vc_config_rss_lut(vf, msg);
+               break;
+       case VIRTCHNL_OP_GET_STATS:
+               err = ice_vc_get_stats_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_ADD_VLAN:
+               err = ice_vc_add_vlan_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_DEL_VLAN:
+               err = ice_vc_remove_vlan_msg(vf, msg);
+               break;
+       case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+               err = ice_vc_ena_vlan_stripping(vf);
+               break;
+       case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+               err = ice_vc_dis_vlan_stripping(vf);
+               break;
+       case VIRTCHNL_OP_UNKNOWN:
+       default:
+               dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
+                       v_opcode, vf_id);
+               err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL,
+                                           NULL, 0);
+               break;
+       }
+       if (err) {
+               /* Helper function cares less about error return values here
+                * as it is busy with pending work.
+                */
+               dev_info(&pf->pdev->dev,
+                        "PF failed to honor VF %d, opcode %d\n, error %d\n",
+                        vf_id, v_opcode, err);
+       }
+}
+
+/**
+ * ice_get_vf_cfg
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
+ *
+ * return VF configuration
+ */
+int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
+                  struct ifla_vf_info *ivi)
+{
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_vsi *vsi = np->vsi;
+       struct ice_pf *pf = vsi->back;
+       struct ice_vf *vf;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+               return -EINVAL;
+       }
+
+       vf = &pf->vf[vf_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
+
+       if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+               netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+               return -EBUSY;
+       }
+
+       ivi->vf = vf_id;
+       ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
+
+       /* VF configuration for VLAN and applicable QoS */
+       ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
+       ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
+                   ICE_VLAN_PRIORITY_S;
+
+       ivi->trusted = vf->trusted;
+       ivi->spoofchk = vf->spoofchk;
+       if (!vf->link_forced)
+               ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+       else if (vf->link_up)
+               ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+       else
+               ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+       ivi->max_tx_rate = vf->tx_rate;
+       ivi->min_tx_rate = 0;
+       return 0;
+}
+
+/**
+ * ice_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ena: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ */
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+{
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_vsi_ctx ctx = { 0 };
+       struct ice_vsi *vsi = np->vsi;
+       struct ice_pf *pf = vsi->back;
+       struct ice_vf *vf;
+       int status;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+               return -EINVAL;
+       }
+
+       vf = &pf->vf[vf_id];
+       if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+               netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+               return -EBUSY;
+       }
+
+       if (ena == vf->spoofchk) {
+               dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n",
+                       ena ? "ON" : "OFF");
+               return 0;
+       }
+
+       ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+       if (ena) {
+               ctx.info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+               ctx.info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
+       }
+
+       status = ice_update_vsi(&pf->hw, vsi->idx, &ctx, NULL);
+       if (status) {
+               dev_dbg(&pf->pdev->dev,
+                       "Error %d, failed to update VSI* parameters\n", status);
+               return -EIO;
+       }
+
+       vf->spoofchk = ena;
+       vsi->info.sec_flags = ctx.info.sec_flags;
+       vsi->info.sw_flags2 = ctx.info.sw_flags2;
+
+       return status;
+}
+
+/**
+ * ice_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: mac address
+ *
+ * program VF mac address
+ */
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_vsi *vsi = np->vsi;
+       struct ice_pf *pf = vsi->back;
+       struct ice_vf *vf;
+       int ret = 0;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+               return -EINVAL;
+       }
+
+       vf = &pf->vf[vf_id];
+       if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+               netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+               return -EBUSY;
+       }
+
+       if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
+               netdev_err(netdev, "%pM not a valid unicast address\n", mac);
+               return -EINVAL;
+       }
+
+       /* copy mac into dflt_lan_addr and trigger a VF reset. The reset
+        * flow will use the updated dflt_lan_addr and add a MAC filter
+        * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
+        * set the MAC address for this VF.
+        */
+       ether_addr_copy(vf->dflt_lan_addr.addr, mac);
+       vf->pf_set_mac = true;
+       netdev_info(netdev,
+                   "mac on VF %d set to %pM\n. VF driver will be reinitialized\n",
+                   vf_id, mac);
+
+       ice_vc_dis_vf(vf);
+       return ret;
+}
+
+/**
+ * ice_set_vf_trust
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @trusted: Boolean value to enable/disable trusted VF
+ *
+ * Enable or disable a given VF as trusted
+ */
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+{
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_vsi *vsi = np->vsi;
+       struct ice_pf *pf = vsi->back;
+       struct ice_vf *vf;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+               return -EINVAL;
+       }
+
+       vf = &pf->vf[vf_id];
+       if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+               dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+               return -EBUSY;
+       }
+
+       /* Check if already trusted */
+       if (trusted == vf->trusted)
+               return 0;
+
+       vf->trusted = trusted;
+       ice_vc_dis_vf(vf);
+       dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+                vf_id, trusted ? "" : "un");
+
+       return 0;
+}
+
+/**
+ * ice_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link_state: required link state
+ *
+ * Set VF's link state, irrespective of physical link state status
+ */
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
+{
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_pf *pf = np->vsi->back;
+       struct virtchnl_pf_event pfe = { 0 };
+       struct ice_link_status *ls;
+       struct ice_vf *vf;
+       struct ice_hw *hw;
+
+       if (vf_id >= pf->num_alloc_vfs) {
+               dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+               return -EINVAL;
+       }
+
+       vf = &pf->vf[vf_id];
+       hw = &pf->hw;
+       ls = &pf->hw.port_info->phy.link_info;
+
+       if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+               dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id);
+               return -EBUSY;
+       }
+
+       pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+       pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+       switch (link_state) {
+       case IFLA_VF_LINK_STATE_AUTO:
+               vf->link_forced = false;
+               vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
+               break;
+       case IFLA_VF_LINK_STATE_ENABLE:
+               vf->link_forced = true;
+               vf->link_up = true;
+               break;
+       case IFLA_VF_LINK_STATE_DISABLE:
+               vf->link_forced = true;
+               vf->link_up = false;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (vf->link_forced)
+               ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
+       else
+               ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
+
+       /* Notify the VF of its new link state */
+       ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+                             sizeof(pfe), NULL);
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
new file mode 100644 (file)
index 0000000..10131e0
--- /dev/null
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_VIRTCHNL_PF_H_
+#define _ICE_VIRTCHNL_PF_H_
+#include "ice.h"
+
+#define ICE_MAX_VLANID                 4095
+#define ICE_VLAN_PRIORITY_S            12
+#define ICE_VLAN_M                     0xFFF
+#define ICE_PRIORITY_M                 0x7000
+
+/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
+#define ICE_MAX_VLAN_PER_VF            8
+#define ICE_MAX_MACADDR_PER_VF         12
+
+/* Malicious Driver Detection */
+#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED                3
+#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED                10
+
+/* Static VF transaction/status register def */
+#define VF_DEVICE_STATUS               0xAA
+#define VF_TRANS_PENDING_M             0x20
+
+/* Specific VF states */
+enum ice_vf_states {
+       ICE_VF_STATE_INIT = 0,
+       ICE_VF_STATE_ACTIVE,
+       ICE_VF_STATE_ENA,
+       ICE_VF_STATE_DIS,
+       ICE_VF_STATE_MC_PROMISC,
+       ICE_VF_STATE_UC_PROMISC,
+       /* state to indicate if PF needs to do vector assignment for VF.
+        * This needs to be set during first time VF initialization or later
+        * when VF asks for more Vectors through virtchnl OP.
+        */
+       ICE_VF_STATE_CFG_INTR,
+       ICE_VF_STATES_NBITS
+};
+
+/* VF capabilities */
+enum ice_virtchnl_cap {
+       ICE_VIRTCHNL_VF_CAP_L2 = 0,
+       ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
+};
+
+/* VF information structure */
+struct ice_vf {
+       struct ice_pf *pf;
+
+       s16 vf_id;                      /* VF id in the PF space */
+       u32 driver_caps;                /* reported by VF driver */
+       int first_vector_idx;           /* first vector index of this VF */
+       struct ice_sw *vf_sw_id;        /* switch id the VF VSIs connect to */
+       struct virtchnl_version_info vf_ver;
+       struct virtchnl_ether_addr dflt_lan_addr;
+       u16 port_vlan_id;
+       u8 pf_set_mac;                  /* VF MAC address set by VMM admin */
+       u8 trusted;
+       u16 lan_vsi_idx;                /* index into PF struct */
+       u16 lan_vsi_num;                /* ID as used by firmware */
+       u64 num_mdd_events;             /* number of mdd events detected */
+       u64 num_inval_msgs;             /* number of continuous invalid msgs */
+       u64 num_valid_msgs;             /* number of valid msgs detected */
+       unsigned long vf_caps;          /* vf's adv. capabilities */
+       DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
+       unsigned int tx_rate;           /* Tx bandwidth limit in Mbps */
+       u8 link_forced;
+       u8 link_up;                     /* only valid if VF link is forced */
+       u8 spoofchk;
+       u16 num_mac;
+       u16 num_vlan;
+       u8 num_req_qs;          /* num of queue pairs requested by VF */
+};
+
+#ifdef CONFIG_PCI_IOV
+void ice_process_vflr_event(struct ice_pf *pf);
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
+                  struct ifla_vf_info *ivi);
+
+void ice_free_vfs(struct ice_pf *pf);
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_vc_notify_link_state(struct ice_pf *pf);
+void ice_vc_notify_reset(struct ice_pf *pf);
+bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
+
+int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+                        u16 vlan_id, u8 qos, __be16 vlan_proto);
+
+int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+                 int max_tx_rate);
+
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
+
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
+#else /* CONFIG_PCI_IOV */
+#define ice_process_vflr_event(pf) do {} while (0)
+#define ice_free_vfs(pf) do {} while (0)
+#define ice_vc_process_vf_msg(pf, event) do {} while (0)
+#define ice_vc_notify_link_state(pf) do {} while (0)
+#define ice_vc_notify_reset(pf) do {} while (0)
+
+static inline bool
+ice_reset_all_vfs(struct ice_pf __always_unused *pf,
+                 bool __always_unused is_vflr)
+{
+       return true;
+}
+
+static inline int
+ice_sriov_configure(struct pci_dev __always_unused *pdev,
+                   int __always_unused num_vfs)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_mac(struct net_device __always_unused *netdev,
+              int __always_unused vf_id, u8 __always_unused *mac)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_get_vf_cfg(struct net_device __always_unused *netdev,
+              int __always_unused vf_id,
+              struct ifla_vf_info __always_unused *ivi)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_trust(struct net_device __always_unused *netdev,
+                int __always_unused vf_id, bool __always_unused trusted)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
+                    int __always_unused vf_id, u16 __always_unused vid,
+                    u8 __always_unused qos, __be16 __always_unused v_proto)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
+                   int __always_unused vf_id, bool __always_unused ena)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_link_state(struct net_device __always_unused *netdev,
+                     int __always_unused vf_id, int __always_unused link_state)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+             int __always_unused vf_id, int __always_unused min_tx_rate,
+             int __always_unused max_tx_rate)
+{
+       return -EOPNOTSUPP;
+}
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_VIRTCHNL_PF_H_ */
index 5414685189cef099d83b4ee46e3261d16c1e2825..ca6b0c458e4a50eca96aea8ace64806cbf66aa45 100644 (file)
@@ -8,7 +8,8 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
 
 ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
               ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
-              ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o
+              ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
+              ixgbe_xsk.o
 
 ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \
                               ixgbe_dcb_82599.o ixgbe_dcb_nl.o
index 5c6fd42e90ed72392411a46fead18f45c2f89a2b..7a7679e7be84c548c091452bdcea736567fffc66 100644 (file)
@@ -228,13 +228,17 @@ struct ixgbe_tx_buffer {
 struct ixgbe_rx_buffer {
        struct sk_buff *skb;
        dma_addr_t dma;
-       struct page *page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
-       __u32 page_offset;
-#else
-       __u16 page_offset;
-#endif
-       __u16 pagecnt_bias;
+       union {
+               struct {
+                       struct page *page;
+                       __u32 page_offset;
+                       __u16 pagecnt_bias;
+               };
+               struct {
+                       void *addr;
+                       u64 handle;
+               };
+       };
 };
 
 struct ixgbe_queue_stats {
@@ -271,6 +275,7 @@ enum ixgbe_ring_state_t {
        __IXGBE_TX_DETECT_HANG,
        __IXGBE_HANG_CHECK_ARMED,
        __IXGBE_TX_XDP_RING,
+       __IXGBE_TX_DISABLED,
 };
 
 #define ring_uses_build_skb(ring) \
@@ -347,6 +352,10 @@ struct ixgbe_ring {
                struct ixgbe_rx_queue_stats rx_stats;
        };
        struct xdp_rxq_info xdp_rxq;
+       struct xdp_umem *xsk_umem;
+       struct zero_copy_allocator zca; /* ZC allocator anchor */
+       u16 ring_idx;           /* {rx,tx,xdp}_ring back reference idx */
+       u16 rx_buf_len;
 } ____cacheline_internodealigned_in_smp;
 
 enum ixgbe_ring_f_enum {
@@ -764,6 +773,11 @@ struct ixgbe_adapter {
 #ifdef CONFIG_XFRM_OFFLOAD
        struct ixgbe_ipsec *ipsec;
 #endif /* CONFIG_XFRM_OFFLOAD */
+
+       /* AF_XDP zero-copy */
+       struct xdp_umem **xsk_umems;
+       u16 num_xsk_umems_used;
+       u16 num_xsk_umems;
 };
 
 static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
index 970f71d5da04bd8e6baa7213d296ac30f2b09edf..0bd1294ba51737240d510f31bbd255faceffeb11 100644 (file)
@@ -3484,17 +3484,6 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
        IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
 }
 
-/**
- * ixgbe_fw_recovery_mode - Check if in FW NVM recovery mode
- * @hw: pointer to hardware structure
- */
-bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw)
-{
-       if (hw->mac.ops.fw_recovery_mode)
-               return hw->mac.ops.fw_recovery_mode(hw);
-       return false;
-}
-
 /**
  *  ixgbe_get_device_caps_generic - Get additional device capabilities
  *  @hw: pointer to hardware structure
index d361f570ca37be6a8df4cd2e2c7a806cee8b1a54..62e6499e4146b14f5d43311125a582c9a31eb349 100644 (file)
@@ -1055,7 +1055,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
        int txr_remaining = adapter->num_tx_queues;
        int xdp_remaining = adapter->num_xdp_queues;
        int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
-       int err;
+       int err, i;
 
        /* only one q_vector if MSI-X is disabled. */
        if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
@@ -1097,6 +1097,21 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
                xdp_idx += xqpv;
        }
 
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               if (adapter->rx_ring[i])
+                       adapter->rx_ring[i]->ring_idx = i;
+       }
+
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               if (adapter->tx_ring[i])
+                       adapter->tx_ring[i]->ring_idx = i;
+       }
+
+       for (i = 0; i < adapter->num_xdp_queues; i++) {
+               if (adapter->xdp_ring[i])
+                       adapter->xdp_ring[i]->ring_idx = i;
+       }
+
        return 0;
 
 err_out:
index 140e87a10ff54f356bf6459b1644de1dceb8de11..51268772a9993468e99cf2aa9cb44ee517faf308 100644 (file)
 #include <net/tc_act/tc_mirred.h>
 #include <net/vxlan.h>
 #include <net/mpls.h>
+#include <net/xdp_sock.h>
 
 #include "ixgbe.h"
 #include "ixgbe_common.h"
 #include "ixgbe_dcb_82599.h"
 #include "ixgbe_sriov.h"
 #include "ixgbe_model.h"
+#include "ixgbe_txrx_common.h"
 
 char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
@@ -893,8 +895,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
        }
 }
 
-static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
-                                         u64 qmask)
+void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
+                           u64 qmask)
 {
        u32 mask;
 
@@ -1673,9 +1675,9 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
  * order to populate the hash, checksum, VLAN, timestamp, protocol, and
  * other fields within the skb.
  **/
-static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
-                                    union ixgbe_adv_rx_desc *rx_desc,
-                                    struct sk_buff *skb)
+void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
+                             union ixgbe_adv_rx_desc *rx_desc,
+                             struct sk_buff *skb)
 {
        struct net_device *dev = rx_ring->netdev;
        u32 flags = rx_ring->q_vector->adapter->flags;
@@ -1708,8 +1710,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
        skb->protocol = eth_type_trans(skb, dev);
 }
 
-static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
-                        struct sk_buff *skb)
+void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
+                 struct sk_buff *skb)
 {
        napi_gro_receive(&q_vector->napi, skb);
 }
@@ -1868,9 +1870,9 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
  *
  * Returns true if an error was encountered and skb was freed.
  **/
-static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
-                                 union ixgbe_adv_rx_desc *rx_desc,
-                                 struct sk_buff *skb)
+bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
+                          union ixgbe_adv_rx_desc *rx_desc,
+                          struct sk_buff *skb)
 {
        struct net_device *netdev = rx_ring->netdev;
 
@@ -2186,14 +2188,6 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
        return skb;
 }
 
-#define IXGBE_XDP_PASS         0
-#define IXGBE_XDP_CONSUMED     BIT(0)
-#define IXGBE_XDP_TX           BIT(1)
-#define IXGBE_XDP_REDIR                BIT(2)
-
-static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
-                              struct xdp_frame *xdpf);
-
 static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
                                     struct ixgbe_ring *rx_ring,
                                     struct xdp_buff *xdp)
@@ -3167,7 +3161,11 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
 #endif
 
        ixgbe_for_each_ring(ring, q_vector->tx) {
-               if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
+               bool wd = ring->xsk_umem ?
+                         ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
+                         ixgbe_clean_tx_irq(q_vector, ring, budget);
+
+               if (!wd)
                        clean_complete = false;
        }
 
@@ -3183,7 +3181,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                per_ring_budget = budget;
 
        ixgbe_for_each_ring(ring, q_vector->rx) {
-               int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
+               int cleaned = ring->xsk_umem ?
+                             ixgbe_clean_rx_irq_zc(q_vector, ring,
+                                                   per_ring_budget) :
+                             ixgbe_clean_rx_irq(q_vector, ring,
                                                 per_ring_budget);
 
                work_done += cleaned;
@@ -3196,11 +3197,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                return budget;
 
        /* all work done, exit the polling mode */
-       napi_complete_done(napi, work_done);
-       if (adapter->rx_itr_setting & 1)
-               ixgbe_set_itr(q_vector);
-       if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
+       if (likely(napi_complete_done(napi, work_done))) {
+               if (adapter->rx_itr_setting & 1)
+                       ixgbe_set_itr(q_vector);
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       ixgbe_irq_enable_queues(adapter,
+                                               BIT_ULL(q_vector->v_idx));
+       }
 
        return min(work_done, budget - 1);
 }
@@ -3473,6 +3476,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        u32 txdctl = IXGBE_TXDCTL_ENABLE;
        u8 reg_idx = ring->reg_idx;
 
+       ring->xsk_umem = NULL;
+       if (ring_is_xdp(ring))
+               ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+
        /* disable queue to avoid issues while updating state */
        IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
        IXGBE_WRITE_FLUSH(hw);
@@ -3577,12 +3584,18 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
                else
                        mtqc |= IXGBE_MTQC_64VF;
        } else {
-               if (tcs > 4)
+               if (tcs > 4) {
                        mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
-               else if (tcs > 1)
+               } else if (tcs > 1) {
                        mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
-               else
-                       mtqc = IXGBE_MTQC_64Q_1PB;
+               } else {
+                       u8 max_txq = adapter->num_tx_queues +
+                               adapter->num_xdp_queues;
+                       if (max_txq > 63)
+                               mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+                       else
+                               mtqc = IXGBE_MTQC_64Q_1PB;
+               }
        }
 
        IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
@@ -3705,10 +3718,27 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
        srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
 
        /* configure the packet buffer length */
-       if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
+       if (rx_ring->xsk_umem) {
+               u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -
+                                 XDP_PACKET_HEADROOM;
+
+               /* If the MAC support setting RXDCTL.RLPML, the
+                * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
+                * RXDCTL.RLPML is set to the actual UMEM buffer
+                * size. If not, then we are stuck with a 1k buffer
+                * size resolution. In this case frames larger than
+                * the UMEM buffer size viewed in a 1k resolution will
+                * be dropped.
+                */
+               if (hw->mac.type != ixgbe_mac_82599EB)
+                       srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+               else
+                       srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
                srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-       else
+       } else {
                srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       }
 
        /* configure descriptor type */
        srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -4031,6 +4061,19 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        u32 rxdctl;
        u8 reg_idx = ring->reg_idx;
 
+       xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+       ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+       if (ring->xsk_umem) {
+               ring->zca.free = ixgbe_zca_free;
+               WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+                                                  MEM_TYPE_ZERO_COPY,
+                                                  &ring->zca));
+
+       } else {
+               WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+                                                  MEM_TYPE_PAGE_SHARED, NULL));
+       }
+
        /* disable queue to avoid use of these values while updating state */
        rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
        rxdctl &= ~IXGBE_RXDCTL_ENABLE;
@@ -4080,6 +4123,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
 #endif
        }
 
+       if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
+               u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
+                                 XDP_PACKET_HEADROOM;
+
+               rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
+                           IXGBE_RXDCTL_RLPML_EN);
+               rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
+
+               ring->rx_buf_len = xsk_buf_len;
+       }
+
        /* initialize rx_buffer_info */
        memset(ring->rx_buffer_info, 0,
               sizeof(struct ixgbe_rx_buffer) * ring->count);
@@ -4093,7 +4147,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
 
        ixgbe_rx_desc_queue_enable(adapter, ring);
-       ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
+       if (ring->xsk_umem)
+               ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
+       else
+               ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
 }
 
 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -5173,6 +5230,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct hlist_node *node2;
        struct ixgbe_fdir_filter *filter;
+       u64 action;
 
        spin_lock(&adapter->fdir_perfect_lock);
 
@@ -5181,12 +5239,17 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
 
        hlist_for_each_entry_safe(filter, node2,
                                  &adapter->fdir_filter_list, fdir_node) {
+               action = filter->action;
+               if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
+                       action =
+                       (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
+
                ixgbe_fdir_write_perfect_filter_82599(hw,
                                &filter->filter,
                                filter->sw_idx,
-                               (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
+                               (action == IXGBE_FDIR_DROP_QUEUE) ?
                                IXGBE_FDIR_DROP_QUEUE :
-                               adapter->rx_ring[filter->action]->reg_idx);
+                               adapter->rx_ring[action]->reg_idx);
        }
 
        spin_unlock(&adapter->fdir_perfect_lock);
@@ -5201,6 +5264,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
        u16 i = rx_ring->next_to_clean;
        struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
 
+       if (rx_ring->xsk_umem) {
+               ixgbe_xsk_clean_rx_ring(rx_ring);
+               goto skip_free;
+       }
+
        /* Free all the Rx ring sk_buffs */
        while (i != rx_ring->next_to_alloc) {
                if (rx_buffer->skb) {
@@ -5239,6 +5307,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
                }
        }
 
+skip_free:
        rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
@@ -5883,6 +5952,11 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
        u16 i = tx_ring->next_to_clean;
        struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
 
+       if (tx_ring->xsk_umem) {
+               ixgbe_xsk_clean_tx_ring(tx_ring);
+               goto out;
+       }
+
        while (i != tx_ring->next_to_use) {
                union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
 
@@ -5934,6 +6008,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
        if (!ring_is_xdp(tx_ring))
                netdev_tx_reset_queue(txring_txq(tx_ring));
 
+out:
        /* reset next_to_use and next_to_clean */
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
@@ -6434,7 +6509,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
        struct device *dev = rx_ring->dev;
        int orig_node = dev_to_node(dev);
        int ring_node = -1;
-       int size, err;
+       int size;
 
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
 
@@ -6471,13 +6546,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
                             rx_ring->queue_index) < 0)
                goto err;
 
-       err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq,
-                                        MEM_TYPE_PAGE_SHARED, NULL);
-       if (err) {
-               xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
-               goto err;
-       }
-
        rx_ring->xdp_prog = adapter->xdp_prog;
 
        return 0;
@@ -8102,9 +8170,6 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
        return __ixgbe_maybe_stop_tx(tx_ring, size);
 }
 
-#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
-                      IXGBE_TXD_CMD_RS)
-
 static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
                        struct ixgbe_tx_buffer *first,
                        const u8 hdr_len)
@@ -8457,8 +8522,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
 }
 
 #endif
-static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
-                              struct xdp_frame *xdpf)
+int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+                       struct xdp_frame *xdpf)
 {
        struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
        struct ixgbe_tx_buffer *tx_buffer;
@@ -8680,6 +8745,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
                return NETDEV_TX_OK;
 
        tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
+       if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
+               return NETDEV_TX_BUSY;
 
        return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
 }
@@ -10191,12 +10258,19 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
                xdp->prog_id = adapter->xdp_prog ?
                        adapter->xdp_prog->aux->id : 0;
                return 0;
+       case XDP_QUERY_XSK_UMEM:
+               return ixgbe_xsk_umem_query(adapter, &xdp->xsk.umem,
+                                           xdp->xsk.queue_id);
+       case XDP_SETUP_XSK_UMEM:
+               return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
+                                           xdp->xsk.queue_id);
+
        default:
                return -EINVAL;
        }
 }
 
-static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
+void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
 {
        /* Force memory writes to complete before letting h/w know there
         * are new descriptors to fetch.
@@ -10226,6 +10300,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
        if (unlikely(!ring))
                return -ENXIO;
 
+       if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
+               return -ENXIO;
+
        for (i = 0; i < n; i++) {
                struct xdp_frame *xdpf = frames[i];
                int err;
@@ -10287,8 +10364,162 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_features_check     = ixgbe_features_check,
        .ndo_bpf                = ixgbe_xdp,
        .ndo_xdp_xmit           = ixgbe_xdp_xmit,
+       .ndo_xsk_async_xmit     = ixgbe_xsk_async_xmit,
 };
 
+static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
+                                struct ixgbe_ring *tx_ring)
+{
+       unsigned long wait_delay, delay_interval;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u8 reg_idx = tx_ring->reg_idx;
+       int wait_loop;
+       u32 txdctl;
+
+       IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+
+       /* delay mechanism from ixgbe_disable_tx */
+       delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+       wait_loop = IXGBE_MAX_RX_DESC_POLL;
+       wait_delay = delay_interval;
+
+       while (wait_loop--) {
+               usleep_range(wait_delay, wait_delay + 10);
+               wait_delay += delay_interval * 2;
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+
+               if (!(txdctl & IXGBE_TXDCTL_ENABLE))
+                       return;
+       }
+
+       e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
+                             struct ixgbe_ring *tx_ring)
+{
+       set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+       ixgbe_disable_txr_hw(adapter, tx_ring);
+}
+
+static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
+                                struct ixgbe_ring *rx_ring)
+{
+       unsigned long wait_delay, delay_interval;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u8 reg_idx = rx_ring->reg_idx;
+       int wait_loop;
+       u32 rxdctl;
+
+       rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+       rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+       rxdctl |= IXGBE_RXDCTL_SWFLSH;
+
+       /* write value back with RXDCTL.ENABLE bit cleared */
+       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+       /* RXDCTL.EN may not change on 82598 if link is down, so skip it */
+       if (hw->mac.type == ixgbe_mac_82598EB &&
+           !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+               return;
+
+       /* delay mechanism from ixgbe_disable_rx */
+       delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+       wait_loop = IXGBE_MAX_RX_DESC_POLL;
+       wait_delay = delay_interval;
+
+       while (wait_loop--) {
+               usleep_range(wait_delay, wait_delay + 10);
+               wait_delay += delay_interval * 2;
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+
+               if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
+                       return;
+       }
+
+       e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
+{
+       memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
+       memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
+}
+
+static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
+{
+       memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
+       memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
+}
+
+/**
+ * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
+ * @adapter: adapter structure
+ * @ring: ring index
+ *
+ * This function disables a certain Rx/Tx/XDP Tx ring. The function
+ * assumes that the netdev is running.
+ **/
+void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
+{
+       struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
+
+       rx_ring = adapter->rx_ring[ring];
+       tx_ring = adapter->tx_ring[ring];
+       xdp_ring = adapter->xdp_ring[ring];
+
+       ixgbe_disable_txr(adapter, tx_ring);
+       if (xdp_ring)
+               ixgbe_disable_txr(adapter, xdp_ring);
+       ixgbe_disable_rxr_hw(adapter, rx_ring);
+
+       if (xdp_ring)
+               synchronize_sched();
+
+       /* Rx/Tx/XDP Tx share the same napi context. */
+       napi_disable(&rx_ring->q_vector->napi);
+
+       ixgbe_clean_tx_ring(tx_ring);
+       if (xdp_ring)
+               ixgbe_clean_tx_ring(xdp_ring);
+       ixgbe_clean_rx_ring(rx_ring);
+
+       ixgbe_reset_txr_stats(tx_ring);
+       if (xdp_ring)
+               ixgbe_reset_txr_stats(xdp_ring);
+       ixgbe_reset_rxr_stats(rx_ring);
+}
+
+/**
+ * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings
+ * @adapter: adapter structure
+ * @ring: ring index
+ *
+ * This function enables a certain Rx/Tx/XDP Tx ring. The function
+ * assumes that the netdev is running.
+ **/
+void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
+{
+       struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
+
+       rx_ring = adapter->rx_ring[ring];
+       tx_ring = adapter->tx_ring[ring];
+       xdp_ring = adapter->xdp_ring[ring];
+
+       /* Rx/Tx/XDP Tx share the same napi context. */
+       napi_enable(&rx_ring->q_vector->napi);
+
+       ixgbe_configure_tx_ring(adapter, tx_ring);
+       if (xdp_ring)
+               ixgbe_configure_tx_ring(adapter, xdp_ring);
+       ixgbe_configure_rx_ring(adapter, rx_ring);
+
+       clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+       clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+}
+
 /**
  * ixgbe_enumerate_functions - Get the number of ports this device has
  * @adapter: adapter structure
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
new file mode 100644 (file)
index 0000000..53d4089
--- /dev/null
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2018 Intel Corporation. */
+
+#ifndef _IXGBE_TXRX_COMMON_H_
+#define _IXGBE_TXRX_COMMON_H_
+
+#define IXGBE_XDP_PASS         0
+#define IXGBE_XDP_CONSUMED     BIT(0)
+#define IXGBE_XDP_TX           BIT(1)
+#define IXGBE_XDP_REDIR                BIT(2)
+
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
+                      IXGBE_TXD_CMD_RS)
+
+int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+                       struct xdp_frame *xdpf);
+bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
+                          union ixgbe_adv_rx_desc *rx_desc,
+                          struct sk_buff *skb);
+void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
+                             union ixgbe_adv_rx_desc *rx_desc,
+                             struct sk_buff *skb);
+void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
+                 struct sk_buff *skb);
+void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring);
+void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
+
+void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
+void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);
+
+struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
+                               struct ixgbe_ring *ring);
+int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
+                        u16 qid);
+int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+                        u16 qid);
+
+void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
+
+void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
+int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
+                         struct ixgbe_ring *rx_ring,
+                         const int budget);
+void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring);
+bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
+                           struct ixgbe_ring *tx_ring, int napi_budget);
+int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id);
+void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring);
+
+#endif /* #define _IXGBE_TXRX_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
new file mode 100644 (file)
index 0000000..65c3e2c
--- /dev/null
@@ -0,0 +1,801 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock.h>
+#include <net/xdp.h>
+
+#include "ixgbe.h"
+#include "ixgbe_txrx_common.h"
+
+struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
+                               struct ixgbe_ring *ring)
+{
+       bool xdp_on = READ_ONCE(adapter->xdp_prog);
+       int qid = ring->ring_idx;
+
+       if (!adapter->xsk_umems || !adapter->xsk_umems[qid] ||
+           qid >= adapter->num_xsk_umems || !xdp_on)
+               return NULL;
+
+       return adapter->xsk_umems[qid];
+}
+
+static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter)
+{
+       if (adapter->xsk_umems)
+               return 0;
+
+       adapter->num_xsk_umems_used = 0;
+       adapter->num_xsk_umems = adapter->num_rx_queues;
+       adapter->xsk_umems = kcalloc(adapter->num_xsk_umems,
+                                    sizeof(*adapter->xsk_umems),
+                                    GFP_KERNEL);
+       if (!adapter->xsk_umems) {
+               adapter->num_xsk_umems = 0;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter,
+                             struct xdp_umem *umem,
+                             u16 qid)
+{
+       int err;
+
+       err = ixgbe_alloc_xsk_umems(adapter);
+       if (err)
+               return err;
+
+       adapter->xsk_umems[qid] = umem;
+       adapter->num_xsk_umems_used++;
+
+       return 0;
+}
+
+static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid)
+{
+       adapter->xsk_umems[qid] = NULL;
+       adapter->num_xsk_umems_used--;
+
+       if (adapter->num_xsk_umems == 0) {
+               kfree(adapter->xsk_umems);
+               adapter->xsk_umems = NULL;
+               adapter->num_xsk_umems = 0;
+       }
+}
+
+static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
+                                 struct xdp_umem *umem)
+{
+       struct device *dev = &adapter->pdev->dev;
+       unsigned int i, j;
+       dma_addr_t dma;
+
+       for (i = 0; i < umem->npgs; i++) {
+               dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
+                                        DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
+               if (dma_mapping_error(dev, dma))
+                       goto out_unmap;
+
+               umem->pages[i].dma = dma;
+       }
+
+       return 0;
+
+out_unmap:
+       for (j = 0; j < i; j++) {
+               dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+                                    DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
+               umem->pages[i].dma = 0;
+       }
+
+       return -1;
+}
+
+static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,
+                                    struct xdp_umem *umem)
+{
+       struct device *dev = &adapter->pdev->dev;
+       unsigned int i;
+
+       for (i = 0; i < umem->npgs; i++) {
+               dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+                                    DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
+
+               umem->pages[i].dma = 0;
+       }
+}
+
+static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
+                                struct xdp_umem *umem,
+                                u16 qid)
+{
+       struct xdp_umem_fq_reuse *reuseq;
+       bool if_running;
+       int err;
+
+       if (qid >= adapter->num_rx_queues)
+               return -EINVAL;
+
+       if (adapter->xsk_umems) {
+               if (qid >= adapter->num_xsk_umems)
+                       return -EINVAL;
+               if (adapter->xsk_umems[qid])
+                       return -EBUSY;
+       }
+
+       reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
+       if (!reuseq)
+               return -ENOMEM;
+
+       xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
+
+       err = ixgbe_xsk_umem_dma_map(adapter, umem);
+       if (err)
+               return err;
+
+       if_running = netif_running(adapter->netdev) &&
+                    READ_ONCE(adapter->xdp_prog);
+
+       if (if_running)
+               ixgbe_txrx_ring_disable(adapter, qid);
+
+       err = ixgbe_add_xsk_umem(adapter, umem, qid);
+
+       if (if_running)
+               ixgbe_txrx_ring_enable(adapter, qid);
+
+       return err;
+}
+
+static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
+{
+       bool if_running;
+
+       if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems ||
+           !adapter->xsk_umems[qid])
+               return -EINVAL;
+
+       if_running = netif_running(adapter->netdev) &&
+                    READ_ONCE(adapter->xdp_prog);
+
+       if (if_running)
+               ixgbe_txrx_ring_disable(adapter, qid);
+
+       ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]);
+       ixgbe_remove_xsk_umem(adapter, qid);
+
+       if (if_running)
+               ixgbe_txrx_ring_enable(adapter, qid);
+
+       return 0;
+}
+
+int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
+                        u16 qid)
+{
+       if (qid >= adapter->num_rx_queues)
+               return -EINVAL;
+
+       if (adapter->xsk_umems) {
+               if (qid >= adapter->num_xsk_umems)
+                       return -EINVAL;
+               *umem = adapter->xsk_umems[qid];
+               return 0;
+       }
+
+       *umem = NULL;
+       return 0;
+}
+
+int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+                        u16 qid)
+{
+       return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
+               ixgbe_xsk_umem_disable(adapter, qid);
+}
+
+static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
+                           struct ixgbe_ring *rx_ring,
+                           struct xdp_buff *xdp)
+{
+       int err, result = IXGBE_XDP_PASS;
+       struct bpf_prog *xdp_prog;
+       struct xdp_frame *xdpf;
+       u32 act;
+
+       rcu_read_lock();
+       xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+       act = bpf_prog_run_xdp(xdp_prog, xdp);
+       xdp->handle += xdp->data - xdp->data_hard_start;
+       switch (act) {
+       case XDP_PASS:
+               break;
+       case XDP_TX:
+               xdpf = convert_to_xdp_frame(xdp);
+               if (unlikely(!xdpf)) {
+                       result = IXGBE_XDP_CONSUMED;
+                       break;
+               }
+               result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+               break;
+       case XDP_REDIRECT:
+               err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+               result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
+               break;
+       default:
+               bpf_warn_invalid_xdp_action(act);
+               /* fallthrough */
+       case XDP_ABORTED:
+               trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+               /* fallthrough -- handle aborts by dropping packet */
+       case XDP_DROP:
+               result = IXGBE_XDP_CONSUMED;
+               break;
+       }
+       rcu_read_unlock();
+       return result;
+}
+
+static struct
+ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
+                                       unsigned int size)
+{
+       struct ixgbe_rx_buffer *bi;
+
+       bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+
+       /* we are reusing so sync this buffer for CPU use */
+       dma_sync_single_range_for_cpu(rx_ring->dev,
+                                     bi->dma, 0,
+                                     size,
+                                     DMA_BIDIRECTIONAL);
+
+       return bi;
+}
+
+static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
+                                    struct ixgbe_rx_buffer *obi)
+{
+       unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
+       u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+       u16 nta = rx_ring->next_to_alloc;
+       struct ixgbe_rx_buffer *nbi;
+
+       nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];
+       /* update, and store next to alloc */
+       nta++;
+       rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+       /* transfer page from old buffer to new buffer */
+       nbi->dma = obi->dma & mask;
+       nbi->dma += hr;
+
+       nbi->addr = (void *)((unsigned long)obi->addr & mask);
+       nbi->addr += hr;
+
+       nbi->handle = obi->handle & mask;
+       nbi->handle += rx_ring->xsk_umem->headroom;
+
+       obi->addr = NULL;
+       obi->skb = NULL;
+}
+
+void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
+{
+       struct ixgbe_rx_buffer *bi;
+       struct ixgbe_ring *rx_ring;
+       u64 hr, mask;
+       u16 nta;
+
+       rx_ring = container_of(alloc, struct ixgbe_ring, zca);
+       hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+       mask = rx_ring->xsk_umem->chunk_mask;
+
+       nta = rx_ring->next_to_alloc;
+       bi = rx_ring->rx_buffer_info;
+
+       nta++;
+       rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+       handle &= mask;
+
+       bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
+       bi->dma += hr;
+
+       bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
+       bi->addr += hr;
+
+       bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
+}
+
+static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
+                                 struct ixgbe_rx_buffer *bi)
+{
+       struct xdp_umem *umem = rx_ring->xsk_umem;
+       void *addr = bi->addr;
+       u64 handle, hr;
+
+       if (addr)
+               return true;
+
+       if (!xsk_umem_peek_addr(umem, &handle)) {
+               rx_ring->rx_stats.alloc_rx_page_failed++;
+               return false;
+       }
+
+       hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+       bi->dma = xdp_umem_get_dma(umem, handle);
+       bi->dma += hr;
+
+       bi->addr = xdp_umem_get_data(umem, handle);
+       bi->addr += hr;
+
+       bi->handle = handle + umem->headroom;
+
+       xsk_umem_discard_addr(umem);
+       return true;
+}
+
+static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
+                                      struct ixgbe_rx_buffer *bi)
+{
+       struct xdp_umem *umem = rx_ring->xsk_umem;
+       u64 handle, hr;
+
+       if (!xsk_umem_peek_addr_rq(umem, &handle)) {
+               rx_ring->rx_stats.alloc_rx_page_failed++;
+               return false;
+       }
+
+       handle &= rx_ring->xsk_umem->chunk_mask;
+
+       hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+       bi->dma = xdp_umem_get_dma(umem, handle);
+       bi->dma += hr;
+
+       bi->addr = xdp_umem_get_data(umem, handle);
+       bi->addr += hr;
+
+       bi->handle = handle + umem->headroom;
+
+       xsk_umem_discard_addr_rq(umem);
+       return true;
+}
+
+static __always_inline bool
+__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
+                           bool alloc(struct ixgbe_ring *rx_ring,
+                                      struct ixgbe_rx_buffer *bi))
+{
+       union ixgbe_adv_rx_desc *rx_desc;
+       struct ixgbe_rx_buffer *bi;
+       u16 i = rx_ring->next_to_use;
+       bool ok = true;
+
+       /* nothing to do */
+       if (!cleaned_count)
+               return true;
+
+       rx_desc = IXGBE_RX_DESC(rx_ring, i);
+       bi = &rx_ring->rx_buffer_info[i];
+       i -= rx_ring->count;
+
+       do {
+               if (!alloc(rx_ring, bi)) {
+                       ok = false;
+                       break;
+               }
+
+               /* sync the buffer for use by the device */
+               dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+                                                bi->page_offset,
+                                                rx_ring->rx_buf_len,
+                                                DMA_BIDIRECTIONAL);
+
+               /* Refresh the desc even if buffer_addrs didn't change
+                * because each write-back erases this info.
+                */
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+
+               rx_desc++;
+               bi++;
+               i++;
+               if (unlikely(!i)) {
+                       rx_desc = IXGBE_RX_DESC(rx_ring, 0);
+                       bi = rx_ring->rx_buffer_info;
+                       i -= rx_ring->count;
+               }
+
+               /* clear the length for the next_to_use descriptor */
+               rx_desc->wb.upper.length = 0;
+
+               cleaned_count--;
+       } while (cleaned_count);
+
+       i += rx_ring->count;
+
+       if (rx_ring->next_to_use != i) {
+               rx_ring->next_to_use = i;
+
+               /* update next to alloc since we have filled the ring */
+               rx_ring->next_to_alloc = i;
+
+               /* Force memory writes to complete before letting h/w
+                * know there are new descriptors to fetch.  (Only
+                * applicable for weak-ordered memory model archs,
+                * such as IA-64).
+                */
+               wmb();
+               writel(i, rx_ring->tail);
+       }
+
+       return ok;
+}
+
+void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
+{
+       __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
+                                   ixgbe_alloc_buffer_slow_zc);
+}
+
+static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,
+                                          u16 count)
+{
+       return __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
+                                          ixgbe_alloc_buffer_zc);
+}
+
+static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
+                                             struct ixgbe_rx_buffer *bi,
+                                             struct xdp_buff *xdp)
+{
+       unsigned int metasize = xdp->data - xdp->data_meta;
+       unsigned int datasize = xdp->data_end - xdp->data;
+       struct sk_buff *skb;
+
+       /* allocate a skb to store the frags */
+       skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+                              xdp->data_end - xdp->data_hard_start,
+                              GFP_ATOMIC | __GFP_NOWARN);
+       if (unlikely(!skb))
+               return NULL;
+
+       skb_reserve(skb, xdp->data - xdp->data_hard_start);
+       memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+       if (metasize)
+               skb_metadata_set(skb, metasize);
+
+       ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+       return skb;
+}
+
+static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
+{
+       u32 ntc = rx_ring->next_to_clean + 1;
+
+       ntc = (ntc < rx_ring->count) ? ntc : 0;
+       rx_ring->next_to_clean = ntc;
+       prefetch(IXGBE_RX_DESC(rx_ring, ntc));
+}
+
+int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
+                         struct ixgbe_ring *rx_ring,
+                         const int budget)
+{
+       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+       struct ixgbe_adapter *adapter = q_vector->adapter;
+       u16 cleaned_count = ixgbe_desc_unused(rx_ring);
+       unsigned int xdp_res, xdp_xmit = 0;
+       bool failure = false;
+       struct sk_buff *skb;
+       struct xdp_buff xdp;
+
+       xdp.rxq = &rx_ring->xdp_rxq;
+
+       while (likely(total_rx_packets < budget)) {
+               union ixgbe_adv_rx_desc *rx_desc;
+               struct ixgbe_rx_buffer *bi;
+               unsigned int size;
+
+               /* return some buffers to hardware, one at a time is too slow */
+               if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
+                       failure = failure ||
+                                 !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
+                                                                cleaned_count);
+                       cleaned_count = 0;
+               }
+
+               rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+               size = le16_to_cpu(rx_desc->wb.upper.length);
+               if (!size)
+                       break;
+
+               /* This memory barrier is needed to keep us from reading
+                * any other fields out of the rx_desc until we know the
+                * descriptor has been written back
+                */
+               dma_rmb();
+
+               bi = ixgbe_get_rx_buffer_zc(rx_ring, size);
+
+               if (unlikely(!ixgbe_test_staterr(rx_desc,
+                                                IXGBE_RXD_STAT_EOP))) {
+                       struct ixgbe_rx_buffer *next_bi;
+
+                       ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+                       ixgbe_inc_ntc(rx_ring);
+                       next_bi =
+                              &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+                       next_bi->skb = ERR_PTR(-EINVAL);
+                       continue;
+               }
+
+               if (unlikely(bi->skb)) {
+                       ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+                       ixgbe_inc_ntc(rx_ring);
+                       continue;
+               }
+
+               xdp.data = bi->addr;
+               xdp.data_meta = xdp.data;
+               xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+               xdp.data_end = xdp.data + size;
+               xdp.handle = bi->handle;
+
+               xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);
+
+               if (xdp_res) {
+                       if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+                               xdp_xmit |= xdp_res;
+                               bi->addr = NULL;
+                               bi->skb = NULL;
+                       } else {
+                               ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+                       }
+                       total_rx_packets++;
+                       total_rx_bytes += size;
+
+                       cleaned_count++;
+                       ixgbe_inc_ntc(rx_ring);
+                       continue;
+               }
+
+               /* XDP_PASS path */
+               skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
+               if (!skb) {
+                       rx_ring->rx_stats.alloc_rx_buff_failed++;
+                       break;
+               }
+
+               cleaned_count++;
+               ixgbe_inc_ntc(rx_ring);
+
+               if (eth_skb_pad(skb))
+                       continue;
+
+               total_rx_bytes += skb->len;
+               total_rx_packets++;
+
+               ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
+               ixgbe_rx_skb(q_vector, skb);
+       }
+
+       if (xdp_xmit & IXGBE_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & IXGBE_XDP_TX) {
+               struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+
+               /* Force memory writes to complete before letting h/w
+                * know there are new descriptors to fetch.
+                */
+               wmb();
+               writel(ring->next_to_use, ring->tail);
+       }
+
+       u64_stats_update_begin(&rx_ring->syncp);
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
+       u64_stats_update_end(&rx_ring->syncp);
+       q_vector->rx.total_packets += total_rx_packets;
+       q_vector->rx.total_bytes += total_rx_bytes;
+
+       return failure ? budget : (int)total_rx_packets;
+}
+
+void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
+{
+       u16 i = rx_ring->next_to_clean;
+       struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
+
+       while (i != rx_ring->next_to_alloc) {
+               xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
+               i++;
+               bi++;
+               if (i == rx_ring->count) {
+                       i = 0;
+                       bi = rx_ring->rx_buffer_info;
+               }
+       }
+}
+
+static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+{
+       union ixgbe_adv_tx_desc *tx_desc = NULL;
+       struct ixgbe_tx_buffer *tx_bi;
+       bool work_done = true;
+       u32 len, cmd_type;
+       dma_addr_t dma;
+
+       while (budget-- > 0) {
+               if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+                       work_done = false;
+                       break;
+               }
+
+               if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
+                       break;
+
+               dma_sync_single_for_device(xdp_ring->dev, dma, len,
+                                          DMA_BIDIRECTIONAL);
+
+               tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
+               tx_bi->bytecount = len;
+               tx_bi->xdpf = NULL;
+
+               tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
+               tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+               /* put descriptor type bits */
+               cmd_type = IXGBE_ADVTXD_DTYP_DATA |
+                          IXGBE_ADVTXD_DCMD_DEXT |
+                          IXGBE_ADVTXD_DCMD_IFCS;
+               cmd_type |= len | IXGBE_TXD_CMD;
+               tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+               tx_desc->read.olinfo_status =
+                       cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+               xdp_ring->next_to_use++;
+               if (xdp_ring->next_to_use == xdp_ring->count)
+                       xdp_ring->next_to_use = 0;
+       }
+
+       if (tx_desc) {
+               ixgbe_xdp_ring_update_tail(xdp_ring);
+               xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+       }
+
+       return !!budget && work_done;
+}
+
+static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
+                                     struct ixgbe_tx_buffer *tx_bi)
+{
+       xdp_return_frame(tx_bi->xdpf);
+       dma_unmap_single(tx_ring->dev,
+                        dma_unmap_addr(tx_bi, dma),
+                        dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
+       dma_unmap_len_set(tx_bi, len, 0);
+}
+
+bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
+                           struct ixgbe_ring *tx_ring, int napi_budget)
+{
+       unsigned int total_packets = 0, total_bytes = 0;
+       u32 i = tx_ring->next_to_clean, xsk_frames = 0;
+       unsigned int budget = q_vector->tx.work_limit;
+       struct xdp_umem *umem = tx_ring->xsk_umem;
+       union ixgbe_adv_tx_desc *tx_desc;
+       struct ixgbe_tx_buffer *tx_bi;
+       bool xmit_done;
+
+       tx_bi = &tx_ring->tx_buffer_info[i];
+       tx_desc = IXGBE_TX_DESC(tx_ring, i);
+       i -= tx_ring->count;
+
+       do {
+               if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+                       break;
+
+               total_bytes += tx_bi->bytecount;
+               total_packets += tx_bi->gso_segs;
+
+               if (tx_bi->xdpf)
+                       ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
+               else
+                       xsk_frames++;
+
+               tx_bi->xdpf = NULL;
+               total_bytes += tx_bi->bytecount;
+
+               tx_bi++;
+               tx_desc++;
+               i++;
+               if (unlikely(!i)) {
+                       i -= tx_ring->count;
+                       tx_bi = tx_ring->tx_buffer_info;
+                       tx_desc = IXGBE_TX_DESC(tx_ring, 0);
+               }
+
+               /* issue prefetch for next Tx descriptor */
+               prefetch(tx_desc);
+
+               /* update budget accounting */
+               budget--;
+       } while (likely(budget));
+
+       i += tx_ring->count;
+       tx_ring->next_to_clean = i;
+
+       u64_stats_update_begin(&tx_ring->syncp);
+       tx_ring->stats.bytes += total_bytes;
+       tx_ring->stats.packets += total_packets;
+       u64_stats_update_end(&tx_ring->syncp);
+       q_vector->tx.total_bytes += total_bytes;
+       q_vector->tx.total_packets += total_packets;
+
+       if (xsk_frames)
+               xsk_umem_complete_tx(umem, xsk_frames);
+
+       xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
+       return budget > 0 && xmit_done;
+}
+
+int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ixgbe_ring *ring;
+
+       if (test_bit(__IXGBE_DOWN, &adapter->state))
+               return -ENETDOWN;
+
+       if (!READ_ONCE(adapter->xdp_prog))
+               return -ENXIO;
+
+       if (qid >= adapter->num_xdp_queues)
+               return -ENXIO;
+
+       if (!adapter->xsk_umems || !adapter->xsk_umems[qid])
+               return -ENXIO;
+
+       ring = adapter->xdp_ring[qid];
+       if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
+               u64 eics = BIT_ULL(ring->q_vector->v_idx);
+
+               ixgbe_irq_rearm_queues(adapter, eics);
+       }
+
+       return 0;
+}
+
+void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
+{
+       u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
+       struct xdp_umem *umem = tx_ring->xsk_umem;
+       struct ixgbe_tx_buffer *tx_bi;
+       u32 xsk_frames = 0;
+
+       while (ntc != ntu) {
+               tx_bi = &tx_ring->tx_buffer_info[ntc];
+
+               if (tx_bi->xdpf)
+                       ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
+               else
+                       xsk_frames++;
+
+               tx_bi->xdpf = NULL;
+
+               ntc++;
+               if (ntc == tx_ring->count)
+                       ntc = 0;
+       }
+
+       if (xsk_frames)
+               xsk_umem_complete_tx(umem, xsk_frames);
+}
index 997cea675a37aec8349c0d9f4691300d0ba13ccc..e8a3231be0bf3e04865d39fb91550bac3de6e177 100644 (file)
@@ -21,7 +21,6 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
        u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 };
        struct ixgbe_hw *hw = &adapter->hw;
        struct sa_mbx_msg *sam;
-       u16 msglen;
        int ret;
 
        /* send the important bits to the PF */
@@ -38,16 +37,14 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
        memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key));
 
        msgbuf[0] = IXGBE_VF_IPSEC_ADD;
-       msglen = sizeof(*sam) + sizeof(msgbuf[0]);
 
        spin_lock_bh(&adapter->mbx_lock);
 
-       ret = hw->mbx.ops.write_posted(hw, msgbuf, msglen);
+       ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
        if (ret)
                goto out;
 
-       msglen = sizeof(msgbuf[0]) * 2;
-       ret = hw->mbx.ops.read_posted(hw, msgbuf, msglen);
+       ret = hw->mbx.ops.read_posted(hw, msgbuf, 2);
        if (ret)
                goto out;
 
@@ -80,11 +77,11 @@ static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
 
        spin_lock_bh(&adapter->mbx_lock);
 
-       err = hw->mbx.ops.write_posted(hw, msgbuf, sizeof(msgbuf));
+       err = hw->mbx.ops.write_posted(hw, msgbuf, 2);
        if (err)
                goto out;
 
-       err = hw->mbx.ops.read_posted(hw, msgbuf, sizeof(msgbuf));
+       err = hw->mbx.ops.read_posted(hw, msgbuf, 2);
        if (err)
                goto out;
 
@@ -470,7 +467,7 @@ int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
        }
 
        sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
-       if (unlikely(sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
+       if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
                netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
                           __func__, sa_idx, xs->xso.offload_handle);
                return 0;
index 2373cd41a62573a877085814542e786b1e13547e..14f9679c957c6afd04929ea23ccd80afd19650ab 100644 (file)
@@ -1755,7 +1755,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
 }
 
 /* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
                               int ip_hdr_len, int l4_proto)
 {
        u32 command;
@@ -2645,14 +2645,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                int ip_hdr_len = 0;
                u8 l4_proto;
+               __be16 l3_proto = vlan_get_protocol(skb);
 
-               if (skb->protocol == htons(ETH_P_IP)) {
+               if (l3_proto == htons(ETH_P_IP)) {
                        struct iphdr *ip4h = ip_hdr(skb);
 
                        /* Calculate IPv4 checksum and L4 checksum */
                        ip_hdr_len = ip4h->ihl;
                        l4_proto = ip4h->protocol;
-               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (l3_proto == htons(ETH_P_IPV6)) {
                        struct ipv6hdr *ip6h = ipv6_hdr(skb);
 
                        /* Read l4_protocol from one of IPv6 extra headers */
@@ -2664,7 +2665,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
                }
 
                return mvpp2_txq_desc_csum(skb_network_offset(skb),
-                               skb->protocol, ip_hdr_len, l4_proto);
+                                          l3_proto, ip_hdr_len, l4_proto);
        }
 
        return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
index ff2fea0f8b75181eba81fd62d08194537116391e..0bd4351b2a49075fb6760034b44cad557e08192e 100644 (file)
@@ -988,8 +988,8 @@ static int pxa168_init_phy(struct net_device *dev)
        cmd.base.phy_address = pep->phy_addr;
        cmd.base.speed = pep->phy_speed;
        cmd.base.duplex = pep->phy_duplex;
-       ethtool_convert_legacy_u32_to_link_mode(cmd.link_modes.advertising,
-                                               PHY_BASIC_FEATURES);
+       bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES,
+                   __ETHTOOL_LINK_MODE_MASK_NBITS);
        cmd.base.autoneg = AUTONEG_ENABLE;
 
        if (cmd.base.speed != 0)
index cc1e9a96a43b27a234f89fd9e9d562e868db89f3..7dbfdac4067ad0ebaf1f4034948e6972589dfeb2 100644 (file)
@@ -243,11 +243,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
                if (dev->phydev->asym_pause)
                        rmt_adv |= LPA_PAUSE_ASYM;
 
-               if (dev->phydev->advertising & ADVERTISED_Pause)
-                       lcl_adv |= ADVERTISE_PAUSE_CAP;
-               if (dev->phydev->advertising & ADVERTISED_Asym_Pause)
-                       lcl_adv |= ADVERTISE_PAUSE_ASYM;
-
+               lcl_adv = ethtool_adv_to_lcl_adv_t(dev->phydev->advertising);
                flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
 
                if (flowctrl & FLOW_CTRL_TX)
index 01a967e717e7d90b2ad54f6178f23c376488f875..1d743bd5d2129305cb37172be82d3cbb7215a721 100644 (file)
@@ -54,6 +54,7 @@
 #include "en_stats.h"
 #include "en/fs.h"
 
+extern const struct net_device_ops mlx5e_netdev_ops;
 struct page_pool;
 
 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
@@ -209,6 +210,7 @@ enum mlx5e_priv_flag {
        MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
        MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
        MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
+       MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4),
 };
 
 #define MLX5E_SET_PFLAG(params, pflag, enable)                 \
@@ -290,6 +292,7 @@ struct mlx5e_dcbx_dp {
 enum {
        MLX5E_RQ_STATE_ENABLED,
        MLX5E_RQ_STATE_AM,
+       MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
 };
 
 struct mlx5e_cq {
@@ -906,8 +909,8 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
 
 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
 
-int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv);
-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
+int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
 
 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
@@ -951,6 +954,8 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
                               struct ethtool_coalesce *coal);
 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
                               struct ethtool_coalesce *coal);
+u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
+u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
                              struct ethtool_ts_info *info);
 int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
@@ -966,6 +971,9 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
 void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
                            struct mlx5e_params *params,
                            u16 max_channels, u16 mtu);
+void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
+                          struct mlx5e_params *params);
+void mlx5e_build_rss_params(struct mlx5e_params *params);
 u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
 void mlx5e_rx_dim_work(struct work_struct *work);
 void mlx5e_tx_dim_work(struct work_struct *work);
index bbf69e859b78c1322a9e218797114c9483a1a060..1431232c9a09ef1ddecdf1e497142f82544e4de0 100644 (file)
@@ -16,6 +16,8 @@ struct mlx5e_tc_table {
 
        DECLARE_HASHTABLE(mod_hdr_tbl, 8);
        DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+       struct notifier_block     netdevice_nb;
 };
 
 struct mlx5e_flow_table {
index 45cdde694d20049af85391ffcc376438fadb921e..8657e0f26995b442545dabd3b7b8d5b299b5a380 100644 (file)
@@ -543,8 +543,11 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
        rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
        if (IS_ERR(rule)) {
                err = PTR_ERR(rule);
-               netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
-                          __func__, arfs_rule->filter_id, arfs_rule->rxq, err);
+               priv->channel_stats[arfs_rule->rxq].rq.arfs_err++;
+               mlx5e_dbg(HW, priv,
+                         "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
+                         __func__, arfs_rule->filter_id, arfs_rule->rxq,
+                         tuple->ip_proto, err);
        }
 
 out:
index 8cd338ceb237c5a8cd6b0445552a50df178a97c0..c86fd770c46337af5049122a062efa027833c218 100644 (file)
@@ -140,6 +140,7 @@ static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
        "tx_cqe_moder",
        "rx_cqe_compress",
        "rx_striding_rq",
+       "rx_no_csum_complete",
 };
 
 int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
@@ -859,18 +860,30 @@ out:
        return err;
 }
 
+u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv)
+{
+       return sizeof(priv->channels.params.toeplitz_hash_key);
+}
+
 static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
-       return sizeof(priv->channels.params.toeplitz_hash_key);
+       return mlx5e_ethtool_get_rxfh_key_size(priv);
 }
 
-static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
+u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv)
 {
        return MLX5E_INDIR_RQT_SIZE;
 }
 
+static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       return mlx5e_ethtool_get_rxfh_indir_size(priv);
+}
+
 static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
                          u8 *hfunc)
 {
@@ -1519,6 +1532,27 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
        return 0;
 }
 
+static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5e_channels *channels = &priv->channels;
+       struct mlx5e_channel *c;
+       int i;
+
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               return 0;
+
+       for (i = 0; i < channels->num; i++) {
+               c = channels->c[i];
+               if (enable)
+                       __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+               else
+                       __clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+       }
+
+       return 0;
+}
+
 static int mlx5e_handle_pflag(struct net_device *netdev,
                              u32 wanted_flags,
                              enum mlx5e_priv_flag flag,
@@ -1570,6 +1604,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
        err = mlx5e_handle_pflag(netdev, pflags,
                                 MLX5E_PFLAG_RX_STRIDING_RQ,
                                 set_pflag_rx_striding_rq);
+       if (err)
+               goto out;
+
+       err = mlx5e_handle_pflag(netdev, pflags,
+                                MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
+                                set_pflag_rx_no_csum_complete);
 
 out:
        mutex_unlock(&priv->state_lock);
index 41cde926cdab6d3d061d62d424ed1bff8c860c9d..c18dcebe1462243d4747cc0d73a1e3e251d82106 100644 (file)
@@ -131,14 +131,14 @@ set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
        if (ip4src_m) {
                memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
                       &ip4src_v, sizeof(ip4src_v));
-               memset(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
-                      0xff, sizeof(ip4src_m));
+               memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
+                      &ip4src_m, sizeof(ip4src_m));
        }
        if (ip4dst_m) {
                memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
                       &ip4dst_v, sizeof(ip4dst_v));
-               memset(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
-                      0xff, sizeof(ip4dst_m));
+               memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+                      &ip4dst_m, sizeof(ip4dst_m));
        }
 
        MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
@@ -173,11 +173,11 @@ set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
        __be16 pdst_m, __be16 pdst_v)
 {
        if (psrc_m) {
-               MLX5E_FTE_SET(headers_c, tcp_sport, 0xffff);
+               MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m));
                MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
        }
        if (pdst_m) {
-               MLX5E_FTE_SET(headers_c, tcp_dport, 0xffff);
+               MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m));
                MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
        }
 
@@ -190,12 +190,12 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
        __be16 pdst_m, __be16 pdst_v)
 {
        if (psrc_m) {
-               MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
+               MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m));
                MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
        }
 
        if (pdst_m) {
-               MLX5E_FTE_SET(headers_c, udp_dport, 0xffff);
+               MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m));
                MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
        }
 
@@ -508,26 +508,14 @@ static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
        if (l4_mask->tos)
                return -EINVAL;
 
-       if (l4_mask->ip4src) {
-               if (!all_ones(l4_mask->ip4src))
-                       return -EINVAL;
+       if (l4_mask->ip4src)
                ntuples++;
-       }
-       if (l4_mask->ip4dst) {
-               if (!all_ones(l4_mask->ip4dst))
-                       return -EINVAL;
+       if (l4_mask->ip4dst)
                ntuples++;
-       }
-       if (l4_mask->psrc) {
-               if (!all_ones(l4_mask->psrc))
-                       return -EINVAL;
+       if (l4_mask->psrc)
                ntuples++;
-       }
-       if (l4_mask->pdst) {
-               if (!all_ones(l4_mask->pdst))
-                       return -EINVAL;
+       if (l4_mask->pdst)
                ntuples++;
-       }
        /* Flow is TCP/UDP */
        return ++ntuples;
 }
@@ -540,16 +528,10 @@ static int validate_ip4(struct ethtool_rx_flow_spec *fs)
        if (l3_mask->l4_4_bytes || l3_mask->tos ||
            fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
                return -EINVAL;
-       if (l3_mask->ip4src) {
-               if (!all_ones(l3_mask->ip4src))
-                       return -EINVAL;
+       if (l3_mask->ip4src)
                ntuples++;
-       }
-       if (l3_mask->ip4dst) {
-               if (!all_ones(l3_mask->ip4dst))
-                       return -EINVAL;
+       if (l3_mask->ip4dst)
                ntuples++;
-       }
        if (l3_mask->proto)
                ntuples++;
        /* Flow is IPv4 */
@@ -588,16 +570,10 @@ static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
        if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
                ntuples++;
 
-       if (l4_mask->psrc) {
-               if (!all_ones(l4_mask->psrc))
-                       return -EINVAL;
+       if (l4_mask->psrc)
                ntuples++;
-       }
-       if (l4_mask->pdst) {
-               if (!all_ones(l4_mask->pdst))
-                       return -EINVAL;
+       if (l4_mask->pdst)
                ntuples++;
-       }
        /* Flow is TCP/UDP */
        return ++ntuples;
 }
index 5955b4d844cc4fdc8784dd4ec62a6b1e7562e1a5..bc034958c8468c33b561c8c40a6c5284e5b2692e 100644 (file)
@@ -929,6 +929,9 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
        if (params->rx_dim_enabled)
                __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
+       if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)
+               __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+
        return 0;
 
 err_destroy_rq:
@@ -3175,7 +3178,7 @@ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *t
        MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
 }
 
-int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
+int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
 {
        struct mlx5e_tir *tir;
        void *tirc;
@@ -3202,7 +3205,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
                }
        }
 
-       if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+       if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
                goto out;
 
        for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
@@ -3273,14 +3276,14 @@ err_destroy_ch_tirs:
        return err;
 }
 
-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
 {
        int i;
 
        for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
                mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
 
-       if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+       if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
                return;
 
        for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
@@ -4315,7 +4318,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
        }
 }
 
-static const struct net_device_ops mlx5e_netdev_ops = {
+const struct net_device_ops mlx5e_netdev_ops = {
        .ndo_open                = mlx5e_open,
        .ndo_stop                = mlx5e_close,
        .ndo_start_xmit          = mlx5e_xmit,
@@ -4480,6 +4483,31 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo
        return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
 }
 
+void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
+                          struct mlx5e_params *params)
+{
+       /* Prefer Striding RQ, unless any of the following holds:
+        * - Striding RQ configuration is not possible/supported.
+        * - Slow PCI heuristic.
+        * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
+        */
+       if (!slow_pci_heuristic(mdev) &&
+           mlx5e_striding_rq_possible(mdev, params) &&
+           (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ||
+            !mlx5e_rx_is_linear_skb(mdev, params)))
+               MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
+       mlx5e_set_rq_type(mdev, params);
+       mlx5e_init_rq_type_params(mdev, params);
+}
+
+void mlx5e_build_rss_params(struct mlx5e_params *params)
+{
+       params->rss_hfunc = ETH_RSS_HASH_XOR;
+       netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
+       mlx5e_build_default_indir_rqt(params->indirection_rqt,
+                                     MLX5E_INDIR_RQT_SIZE, params->num_channels);
+}
+
 void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
                            struct mlx5e_params *params,
                            u16 max_channels, u16 mtu)
@@ -4503,20 +4531,10 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
                params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
 
        MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
+       MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
 
        /* RQ */
-       /* Prefer Striding RQ, unless any of the following holds:
-        * - Striding RQ configuration is not possible/supported.
-        * - Slow PCI heuristic.
-        * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
-        */
-       if (!slow_pci_heuristic(mdev) &&
-           mlx5e_striding_rq_possible(mdev, params) &&
-           (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ||
-            !mlx5e_rx_is_linear_skb(mdev, params)))
-               MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
-       mlx5e_set_rq_type(mdev, params);
-       mlx5e_init_rq_type_params(mdev, params);
+       mlx5e_build_rq_params(mdev, params);
 
        /* HW LRO */
 
@@ -4539,10 +4557,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
        params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
 
        /* RSS */
-       params->rss_hfunc = ETH_RSS_HASH_XOR;
-       netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
-       mlx5e_build_default_indir_rqt(params->indirection_rqt,
-                                     MLX5E_INDIR_RQT_SIZE, max_channels);
+       mlx5e_build_rss_params(params);
 }
 
 static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
@@ -4780,7 +4795,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
        if (err)
                goto err_destroy_indirect_rqts;
 
-       err = mlx5e_create_indirect_tirs(priv);
+       err = mlx5e_create_indirect_tirs(priv, true);
        if (err)
                goto err_destroy_direct_rqts;
 
@@ -4805,7 +4820,7 @@ err_destroy_flow_steering:
 err_destroy_direct_tirs:
        mlx5e_destroy_direct_tirs(priv);
 err_destroy_indirect_tirs:
-       mlx5e_destroy_indirect_tirs(priv);
+       mlx5e_destroy_indirect_tirs(priv, true);
 err_destroy_direct_rqts:
        mlx5e_destroy_direct_rqts(priv);
 err_destroy_indirect_rqts:
@@ -4822,7 +4837,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
        mlx5e_tc_nic_cleanup(priv);
        mlx5e_destroy_flow_steering(priv);
        mlx5e_destroy_direct_tirs(priv);
-       mlx5e_destroy_indirect_tirs(priv);
+       mlx5e_destroy_indirect_tirs(priv, true);
        mlx5e_destroy_direct_rqts(priv);
        mlx5e_destroy_rqt(priv, &priv->indir_rqt);
        mlx5e_close_drop_rq(&priv->drop_rq);
index f6eead24931fb17ab17159d21559dc296a1817b7..9264c3332aa60faf6842b602d81cb4bc9fb10e28 100644 (file)
@@ -46,8 +46,6 @@
 
 #define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
        max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
-#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \
-       max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)
 
 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
 
@@ -182,12 +180,108 @@ static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
        }
 }
 
+static void mlx5e_rep_get_ringparam(struct net_device *dev,
+                               struct ethtool_ringparam *param)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       mlx5e_ethtool_get_ringparam(priv, param);
+}
+
+static int mlx5e_rep_set_ringparam(struct net_device *dev,
+                              struct ethtool_ringparam *param)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       return mlx5e_ethtool_set_ringparam(priv, param);
+}
+
+static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
+                                          struct mlx5_flow_destination *dest)
+{
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5e_rep_priv *rpriv = priv->ppriv;
+       struct mlx5_eswitch_rep *rep = rpriv->rep;
+       struct mlx5_flow_handle *flow_rule;
+
+       flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
+                                                     rep->vport,
+                                                     dest);
+       if (IS_ERR(flow_rule))
+               return PTR_ERR(flow_rule);
+
+       mlx5_del_flow_rules(rpriv->vport_rx_rule);
+       rpriv->vport_rx_rule = flow_rule;
+       return 0;
+}
+
+static void mlx5e_rep_get_channels(struct net_device *dev,
+                                  struct ethtool_channels *ch)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       mlx5e_ethtool_get_channels(priv, ch);
+}
+
+static int mlx5e_rep_set_channels(struct net_device *dev,
+                                 struct ethtool_channels *ch)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       u16 curr_channels_amount = priv->channels.params.num_channels;
+       u32 new_channels_amount = ch->combined_count;
+       struct mlx5_flow_destination new_dest;
+       int err = 0;
+
+       err = mlx5e_ethtool_set_channels(priv, ch);
+       if (err)
+               return err;
+
+       if (curr_channels_amount == 1 && new_channels_amount > 1) {
+               new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+               new_dest.ft = priv->fs.ttc.ft.t;
+       } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
+               new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+               new_dest.tir_num = priv->direct_tir[0].tirn;
+       } else {
+               return 0;
+       }
+
+       err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
+       if (err) {
+               netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
+                           curr_channels_amount, new_channels_amount);
+               return err;
+       }
+
+       return 0;
+}
+
+static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       return mlx5e_ethtool_get_rxfh_key_size(priv);
+}
+
+static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       return mlx5e_ethtool_get_rxfh_indir_size(priv);
+}
+
 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
        .get_drvinfo       = mlx5e_rep_get_drvinfo,
        .get_link          = ethtool_op_get_link,
        .get_strings       = mlx5e_rep_get_strings,
        .get_sset_count    = mlx5e_rep_get_sset_count,
        .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
+       .get_ringparam     = mlx5e_rep_get_ringparam,
+       .set_ringparam     = mlx5e_rep_set_ringparam,
+       .get_channels      = mlx5e_rep_get_channels,
+       .set_channels      = mlx5e_rep_set_channels,
+       .get_rxfh_key_size   = mlx5e_rep_get_rxfh_key_size,
+       .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
 };
 
 int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
@@ -934,16 +1028,20 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
        params->hard_mtu    = MLX5E_ETH_HARD_MTU;
        params->sw_mtu      = mtu;
        params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
-       params->rq_wq_type  = MLX5_WQ_TYPE_CYCLIC;
-       params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
 
+       /* RQ */
+       mlx5e_build_rq_params(mdev, params);
+
+       /* CQ moderation params */
        params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
        mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
 
        params->num_tc                = 1;
-       params->lro_wqe_sz            = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
 
        mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
+
+       /* RSS */
+       mlx5e_build_rss_params(params);
 }
 
 static void mlx5e_build_rep_netdev(struct net_device *netdev)
@@ -963,6 +1061,16 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
        netdev->features         |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
        netdev->hw_features      |= NETIF_F_HW_TC;
 
+       netdev->hw_features    |= NETIF_F_SG;
+       netdev->hw_features    |= NETIF_F_IP_CSUM;
+       netdev->hw_features    |= NETIF_F_IPV6_CSUM;
+       netdev->hw_features    |= NETIF_F_GRO;
+       netdev->hw_features    |= NETIF_F_TSO;
+       netdev->hw_features    |= NETIF_F_TSO6;
+       netdev->hw_features    |= NETIF_F_RXCSUM;
+
+       netdev->features |= netdev->hw_features;
+
        eth_hw_addr_random(netdev);
 
        netdev->min_mtu = ETH_MIN_MTU;
@@ -986,7 +1094,7 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
 
        INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
 
-       priv->channels.params.num_channels = profile->max_nch(mdev);
+       priv->channels.params.num_channels = 1;
 
        mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
        mlx5e_build_rep_netdev(netdev);
@@ -994,13 +1102,50 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
        mlx5e_timestamp_init(priv);
 }
 
-static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
+static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
+{
+       struct ttc_params ttc_params = {};
+       int tt, err;
+
+       priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+                                             MLX5_FLOW_NAMESPACE_KERNEL);
+
+       /* The inner_ttc in the ttc params is intentionally not set */
+       ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
+       mlx5e_set_ttc_ft_params(&ttc_params);
+       for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+               ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+
+       err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
+       if (err) {
+               netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
+               return err;
+       }
+       return 0;
+}
+
+static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct mlx5_eswitch_rep *rep = rpriv->rep;
-       struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5_flow_handle *flow_rule;
+       struct mlx5_flow_destination dest;
+
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+       dest.tir_num = priv->direct_tir[0].tirn;
+       flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
+                                                     rep->vport,
+                                                     &dest);
+       if (IS_ERR(flow_rule))
+               return PTR_ERR(flow_rule);
+       rpriv->vport_rx_rule = flow_rule;
+       return 0;
+}
+
+static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
        int err;
 
        mlx5e_init_l2_addr(priv);
@@ -1011,29 +1156,42 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
                return err;
        }
 
-       err = mlx5e_create_direct_rqts(priv);
+       err = mlx5e_create_indirect_rqt(priv);
        if (err)
                goto err_close_drop_rq;
 
-       err = mlx5e_create_direct_tirs(priv);
+       err = mlx5e_create_direct_rqts(priv);
+       if (err)
+               goto err_destroy_indirect_rqts;
+
+       err = mlx5e_create_indirect_tirs(priv, false);
        if (err)
                goto err_destroy_direct_rqts;
 
-       flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
-                                                     rep->vport,
-                                                     priv->direct_tir[0].tirn);
-       if (IS_ERR(flow_rule)) {
-               err = PTR_ERR(flow_rule);
+       err = mlx5e_create_direct_tirs(priv);
+       if (err)
+               goto err_destroy_indirect_tirs;
+
+       err = mlx5e_create_rep_ttc_table(priv);
+       if (err)
                goto err_destroy_direct_tirs;
-       }
-       rpriv->vport_rx_rule = flow_rule;
+
+       err = mlx5e_create_rep_vport_rx_rule(priv);
+       if (err)
+               goto err_destroy_ttc_table;
 
        return 0;
 
+err_destroy_ttc_table:
+       mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
 err_destroy_direct_tirs:
        mlx5e_destroy_direct_tirs(priv);
+err_destroy_indirect_tirs:
+       mlx5e_destroy_indirect_tirs(priv, false);
 err_destroy_direct_rqts:
        mlx5e_destroy_direct_rqts(priv);
+err_destroy_indirect_rqts:
+       mlx5e_destroy_rqt(priv, &priv->indir_rqt);
 err_close_drop_rq:
        mlx5e_close_drop_rq(&priv->drop_rq);
        return err;
@@ -1044,8 +1202,11 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 
        mlx5_del_flow_rules(rpriv->vport_rx_rule);
+       mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
        mlx5e_destroy_direct_tirs(priv);
+       mlx5e_destroy_indirect_tirs(priv, false);
        mlx5e_destroy_direct_rqts(priv);
+       mlx5e_destroy_rqt(priv, &priv->indir_rqt);
        mlx5e_close_drop_rq(&priv->drop_rq);
 }
 
@@ -1061,12 +1222,6 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
        return 0;
 }
 
-static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
-{
-#define        MLX5E_PORT_REPRESENTOR_NCH 1
-       return MLX5E_PORT_REPRESENTOR_NCH;
-}
-
 static const struct mlx5e_profile mlx5e_rep_profile = {
        .init                   = mlx5e_init_rep,
        .init_rx                = mlx5e_init_rep_rx,
@@ -1074,10 +1229,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
        .init_tx                = mlx5e_init_rep_tx,
        .cleanup_tx             = mlx5e_cleanup_nic_tx,
        .update_stats           = mlx5e_rep_update_hw_counters,
-       .max_nch                = mlx5e_get_rep_max_num_channels,
+       .max_nch                = mlx5e_get_max_num_channels,
        .update_carrier         = NULL,
        .rx_handlers.handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
-       .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
+       .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
        .max_tc                 = 1,
 };
 
index 424bc89184c652621be82330ea1557b2c0a13e0d..f19067c94272f85dc6f9528774e5f1e49a8a6d73 100644 (file)
@@ -782,6 +782,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                return;
        }
 
+       if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
+               goto csum_unnecessary;
+
        if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
                if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP))
                        goto csum_unnecessary;
@@ -805,7 +808,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
 
 csum_unnecessary:
        if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
-                  (cqe->hds_ip_ext & CQE_L4_OK))) {
+                  ((cqe->hds_ip_ext & CQE_L4_OK) ||
+                   (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
                if (cqe_is_tunneled(cqe)) {
                        skb->csum_level = 1;
index 90c7607b1f44dfbad303de1b3b5791f2c89c0516..b7d4896c7c7b2c6176d593df72468c1018a21e1f 100644 (file)
@@ -93,6 +93,7 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
@@ -170,6 +171,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                s->rx_cache_busy  += rq_stats->cache_busy;
                s->rx_cache_waive += rq_stats->cache_waive;
                s->rx_congst_umr  += rq_stats->congst_umr;
+               s->rx_arfs_err    += rq_stats->arfs_err;
                s->ch_events      += ch_stats->events;
                s->ch_poll        += ch_stats->poll;
                s->ch_arm         += ch_stats->arm;
@@ -1161,6 +1163,7 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
 };
 
 static const struct counter_desc sq_stats_desc[] = {
index a5fb3dc27f5003ee92bdd7b21258266f3224d07c..77f74ce11280e26a461343adc92e56d63b387812 100644 (file)
@@ -106,6 +106,7 @@ struct mlx5e_sw_stats {
        u64 rx_cache_busy;
        u64 rx_cache_waive;
        u64 rx_congst_umr;
+       u64 rx_arfs_err;
        u64 ch_events;
        u64 ch_poll;
        u64 ch_arm;
@@ -202,6 +203,7 @@ struct mlx5e_rq_stats {
        u64 cache_busy;
        u64 cache_waive;
        u64 congst_umr;
+       u64 arfs_err;
 };
 
 struct mlx5e_sq_stats {
index 9fed54017659de3b0f58a1287a7eff605c077f6c..6de21d9f4fad7dbb5c90ddc15aefb92ad31f1933 100644 (file)
@@ -532,7 +532,8 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
 #define UNKNOWN_MATCH_PRIO 8
 
 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
-                                 struct mlx5_flow_spec *spec, u8 *match_prio)
+                                 struct mlx5_flow_spec *spec, u8 *match_prio,
+                                 struct netlink_ext_ack *extack)
 {
        void *headers_c, *headers_v;
        u8 prio_val, prio_mask = 0;
@@ -540,8 +541,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
 
 #ifdef CONFIG_MLX5_CORE_EN_DCB
        if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
-               netdev_warn(priv->netdev,
-                           "only PCP trust state supported for hairpin\n");
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "only PCP trust state supported for hairpin");
                return -EOPNOTSUPP;
        }
 #endif
@@ -557,8 +558,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
        if (!vlan_present || !prio_mask) {
                prio_val = UNKNOWN_MATCH_PRIO;
        } else if (prio_mask != 0x7) {
-               netdev_warn(priv->netdev,
-                           "masked priority match not supported for hairpin\n");
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "masked priority match not supported for hairpin");
                return -EOPNOTSUPP;
        }
 
@@ -568,7 +569,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
 
 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
                                  struct mlx5e_tc_flow *flow,
-                                 struct mlx5e_tc_flow_parse_attr *parse_attr)
+                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
+                                 struct netlink_ext_ack *extack)
 {
        int peer_ifindex = parse_attr->mirred_ifindex;
        struct mlx5_hairpin_params params;
@@ -583,12 +585,13 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
 
        peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
        if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
-               netdev_warn(priv->netdev, "hairpin is not supported\n");
+               NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
                return -EOPNOTSUPP;
        }
 
        peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
-       err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
+       err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
+                                    extack);
        if (err)
                return err;
        hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
@@ -677,7 +680,8 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
 static struct mlx5_flow_handle *
 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
                      struct mlx5e_tc_flow_parse_attr *parse_attr,
-                     struct mlx5e_tc_flow *flow)
+                     struct mlx5e_tc_flow *flow,
+                     struct netlink_ext_ack *extack)
 {
        struct mlx5_nic_flow_attr *attr = flow->nic_attr;
        struct mlx5_core_dev *dev = priv->mdev;
@@ -694,7 +698,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
        int err, dest_ix = 0;
 
        if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
-               err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+               err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
                if (err) {
                        rule = ERR_PTR(err);
                        goto err_add_hairpin_flow;
@@ -753,6 +757,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
                                                            MLX5E_TC_TABLE_NUM_GROUPS,
                                                            MLX5E_TC_FT_LEVEL, 0);
                if (IS_ERR(priv->fs.tc.t)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Failed to create tc offload table\n");
                        netdev_err(priv->netdev,
                                   "Failed to create tc offload table\n");
                        rule = ERR_CAST(priv->fs.tc.t);
@@ -819,12 +825,14 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct ip_tunnel_info *tun_info,
                              struct net_device *mirred_dev,
                              struct net_device **encap_dev,
-                             struct mlx5e_tc_flow *flow);
+                             struct mlx5e_tc_flow *flow,
+                             struct netlink_ext_ack *extack);
 
 static struct mlx5_flow_handle *
 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                      struct mlx5e_tc_flow_parse_attr *parse_attr,
-                     struct mlx5e_tc_flow *flow)
+                     struct mlx5e_tc_flow *flow,
+                     struct netlink_ext_ack *extack)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
@@ -838,7 +846,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                out_dev = __dev_get_by_index(dev_net(priv->netdev),
                                             attr->parse_attr->mirred_ifindex);
                err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
-                                        out_dev, &encap_dev, flow);
+                                        out_dev, &encap_dev, flow, extack);
                if (err) {
                        rule = ERR_PTR(err);
                        if (err != -EAGAIN)
@@ -1105,6 +1113,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                             struct mlx5_flow_spec *spec,
                             struct tc_cls_flower_offload *f)
 {
+       struct netlink_ext_ack *extack = f->common.extack;
        void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
                                       outer_headers);
        void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -1133,6 +1142,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
                        parse_vxlan_attr(spec, f);
                else {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "port isn't an offloaded vxlan udp dport");
                        netdev_warn(priv->netdev,
                                    "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
                        return -EOPNOTSUPP;
@@ -1149,6 +1160,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                         udp_sport, ntohs(key->src));
        } else { /* udp dst port must be given */
 vxlan_match_offload_err:
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "IP tunnel decap offload supported only for vxlan, must set UDP dport");
                netdev_warn(priv->netdev,
                            "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
                return -EOPNOTSUPP;
@@ -1225,6 +1238,16 @@ vxlan_match_offload_err:
 
                MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
                MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+
+               if (mask->ttl &&
+                   !MLX5_CAP_ESW_FLOWTABLE_FDB
+                       (priv->mdev,
+                        ft_field_support.outer_ipv4_ttl)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Matching on TTL is not supported");
+                       return -EOPNOTSUPP;
+               }
+
        }
 
        /* Enforce DMAC when offloading incoming tunneled flows.
@@ -1247,6 +1270,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                              struct tc_cls_flower_offload *f,
                              u8 *match_level)
 {
+       struct netlink_ext_ack *extack = f->common.extack;
        void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
                                       outer_headers);
        void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -1277,6 +1301,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
              BIT(FLOW_DISSECTOR_KEY_TCP) |
              BIT(FLOW_DISSECTOR_KEY_IP)  |
              BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
+               NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
                netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
                            f->dissector->used_keys);
                return -EOPNOTSUPP;
@@ -1368,6 +1393,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
 
                        *match_level = MLX5_MATCH_L2;
                }
+       } else {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
        }
 
        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -1550,8 +1578,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
 
                if (mask->ttl &&
                    !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
-                                               ft_field_support.outer_ipv4_ttl))
+                                               ft_field_support.outer_ipv4_ttl)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Matching on TTL is not supported");
                        return -EOPNOTSUPP;
+               }
 
                if (mask->tos || mask->ttl)
                        *match_level = MLX5_MATCH_L3;
@@ -1593,6 +1624,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                                 udp_dport, ntohs(key->dst));
                        break;
                default:
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Only UDP and TCP transports are supported for L4 matching");
                        netdev_err(priv->netdev,
                                   "Only UDP and TCP transport are supported\n");
                        return -EINVAL;
@@ -1629,6 +1662,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
                            struct mlx5_flow_spec *spec,
                            struct tc_cls_flower_offload *f)
 {
+       struct netlink_ext_ack *extack = f->common.extack;
        struct mlx5_core_dev *dev = priv->mdev;
        struct mlx5_eswitch *esw = dev->priv.eswitch;
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1643,6 +1677,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
                if (rep->vport != FDB_UPLINK_VPORT &&
                    (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
                    esw->offloads.inline_mode < match_level)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Flow is not offloaded due to min inline setting");
                        netdev_warn(priv->netdev,
                                    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
                                    match_level, esw->offloads.inline_mode);
@@ -1744,7 +1780,8 @@ static struct mlx5_fields fields[] = {
  */
 static int offload_pedit_fields(struct pedit_headers *masks,
                                struct pedit_headers *vals,
-                               struct mlx5e_tc_flow_parse_attr *parse_attr)
+                               struct mlx5e_tc_flow_parse_attr *parse_attr,
+                               struct netlink_ext_ack *extack)
 {
        struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
        int i, action_size, nactions, max_actions, first, last, next_z;
@@ -1783,11 +1820,15 @@ static int offload_pedit_fields(struct pedit_headers *masks,
                        continue;
 
                if (s_mask && a_mask) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "can't set and add to the same HW field");
                        printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
                        return -EOPNOTSUPP;
                }
 
                if (nactions == max_actions) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "too many pedit actions, can't offload");
                        printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
                        return -EOPNOTSUPP;
                }
@@ -1820,6 +1861,8 @@ static int offload_pedit_fields(struct pedit_headers *masks,
                next_z = find_next_zero_bit(&mask, field_bsize, first);
                last  = find_last_bit(&mask, field_bsize);
                if (first < next_z && next_z < last) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "rewrite of few sub-fields isn't supported");
                        printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
                               mask);
                        return -EOPNOTSUPP;
@@ -1878,7 +1921,8 @@ static const struct pedit_headers zero_masks = {};
 
 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
                                 const struct tc_action *a, int namespace,
-                                struct mlx5e_tc_flow_parse_attr *parse_attr)
+                                struct mlx5e_tc_flow_parse_attr *parse_attr,
+                                struct netlink_ext_ack *extack)
 {
        struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
        int nkeys, i, err = -EOPNOTSUPP;
@@ -1896,12 +1940,13 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
                err = -EOPNOTSUPP; /* can't be all optimistic */
 
                if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
-                       netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n");
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "legacy pedit isn't offloaded");
                        goto out_err;
                }
 
                if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
-                       netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd);
+                       NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
                        goto out_err;
                }
 
@@ -1918,13 +1963,15 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
        if (err)
                goto out_err;
 
-       err = offload_pedit_fields(masks, vals, parse_attr);
+       err = offload_pedit_fields(masks, vals, parse_attr, extack);
        if (err < 0)
                goto out_dealloc_parsed_actions;
 
        for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
                cmd_masks = &masks[cmd];
                if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "attempt to offload an unsupported field");
                        netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
                        print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
                                       16, 1, cmd_masks, sizeof(zero_masks), true);
@@ -1941,19 +1988,26 @@ out_err:
        return err;
 }
 
-static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
+static bool csum_offload_supported(struct mlx5e_priv *priv,
+                                  u32 action,
+                                  u32 update_flags,
+                                  struct netlink_ext_ack *extack)
 {
        u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
                         TCA_CSUM_UPDATE_FLAG_UDP;
 
        /*  The HW recalcs checksums only if re-writing headers */
        if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "TC csum action is only offloaded with pedit");
                netdev_warn(priv->netdev,
                            "TC csum action is only offloaded with pedit\n");
                return false;
        }
 
        if (update_flags & ~prot_flags) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "can't offload TC csum action for some header/s");
                netdev_warn(priv->netdev,
                            "can't offload TC csum action for some header/s - flags %#x\n",
                            update_flags);
@@ -1964,7 +2018,8 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
 }
 
 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
-                                         struct tcf_exts *exts)
+                                         struct tcf_exts *exts,
+                                         struct netlink_ext_ack *extack)
 {
        const struct tc_action *a;
        bool modify_ip_header;
@@ -2002,6 +2057,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
        ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
        if (modify_ip_header && ip_proto != IPPROTO_TCP &&
            ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "can't offload re-write of non TCP/UDP");
                pr_info("can't offload re-write of ip proto %d\n", ip_proto);
                return false;
        }
@@ -2013,7 +2070,8 @@ out_ok:
 static bool actions_match_supported(struct mlx5e_priv *priv,
                                    struct tcf_exts *exts,
                                    struct mlx5e_tc_flow_parse_attr *parse_attr,
-                                   struct mlx5e_tc_flow *flow)
+                                   struct mlx5e_tc_flow *flow,
+                                   struct netlink_ext_ack *extack)
 {
        u32 actions;
 
@@ -2027,7 +2085,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
                return false;
 
        if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
-               return modify_header_match_supported(&parse_attr->spec, exts);
+               return modify_header_match_supported(&parse_attr->spec, exts,
+                                                    extack);
 
        return true;
 }
@@ -2040,15 +2099,16 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
        fmdev = priv->mdev;
        pmdev = peer_priv->mdev;
 
-       mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid);
-       mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid);
+       fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
+       psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
 
        return (fsystem_guid == psystem_guid);
 }
 
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                struct mlx5e_tc_flow_parse_attr *parse_attr,
-                               struct mlx5e_tc_flow *flow)
+                               struct mlx5e_tc_flow *flow,
+                               struct netlink_ext_ack *extack)
 {
        struct mlx5_nic_flow_attr *attr = flow->nic_attr;
        const struct tc_action *a;
@@ -2072,7 +2132,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_pedit(a)) {
                        err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
-                                                   parse_attr);
+                                                   parse_attr, extack);
                        if (err)
                                return err;
 
@@ -2083,7 +2143,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_csum(a)) {
                        if (csum_offload_supported(priv, action,
-                                                  tcf_csum_update_flags(a)))
+                                                  tcf_csum_update_flags(a),
+                                                  extack))
                                continue;
 
                        return -EOPNOTSUPP;
@@ -2099,6 +2160,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
                                          MLX5_FLOW_CONTEXT_ACTION_COUNT;
                        } else {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "device is not on same HW, can't offload");
                                netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
                                            peer_dev->name);
                                return -EINVAL;
@@ -2110,8 +2173,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                        u32 mark = tcf_skbedit_mark(a);
 
                        if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
-                               netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
-                                           mark);
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Bad flow mark - only 16 bit is supported");
                                return -EINVAL;
                        }
 
@@ -2124,7 +2187,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        }
 
        attr->action = action;
-       if (!actions_match_supported(priv, exts, parse_attr, flow))
+       if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
                return -EOPNOTSUPP;
 
        return 0;
@@ -2526,7 +2589,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct ip_tunnel_info *tun_info,
                              struct net_device *mirred_dev,
                              struct net_device **encap_dev,
-                             struct mlx5e_tc_flow *flow)
+                             struct mlx5e_tc_flow *flow,
+                             struct netlink_ext_ack *extack)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        unsigned short family = ip_tunnel_info_af(tun_info);
@@ -2544,6 +2608,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        /* setting udp src port isn't supported */
        if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
 vxlan_encap_offload_err:
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "must set udp dst port and not set udp src port");
                netdev_warn(priv->netdev,
                            "must set udp dst port and not set udp src port\n");
                return -EOPNOTSUPP;
@@ -2553,6 +2619,8 @@ vxlan_encap_offload_err:
            MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
                tunnel_type = MLX5_HEADER_TYPE_VXLAN;
        } else {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "port isn't an offloaded vxlan udp dport");
                netdev_warn(priv->netdev,
                            "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
                return -EOPNOTSUPP;
@@ -2657,7 +2725,8 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
 
 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                struct mlx5e_tc_flow_parse_attr *parse_attr,
-                               struct mlx5e_tc_flow *flow)
+                               struct mlx5e_tc_flow *flow,
+                               struct netlink_ext_ack *extack)
 {
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -2683,7 +2752,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_pedit(a)) {
                        err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
-                                                   parse_attr);
+                                                   parse_attr, extack);
                        if (err)
                                return err;
 
@@ -2694,7 +2763,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_csum(a)) {
                        if (csum_offload_supported(priv, action,
-                                                  tcf_csum_update_flags(a)))
+                                                  tcf_csum_update_flags(a),
+                                                  extack))
                                continue;
 
                        return -EOPNOTSUPP;
@@ -2707,6 +2777,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                        out_dev = tcf_mirred_dev(a);
 
                        if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "can't support more output ports, can't offload forwarding");
                                pr_err("can't support more than %d output ports, can't offload forwarding\n",
                                       attr->out_count);
                                return -EOPNOTSUPP;
@@ -2730,6 +2802,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                          MLX5_FLOW_CONTEXT_ACTION_COUNT;
                                /* attr->out_rep is resolved when we handle encap */
                        } else {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "devices are not on same switch HW, can't offload forwarding");
                                pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
                                       priv->netdev->name, out_dev->name);
                                return -EINVAL;
@@ -2766,10 +2840,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        }
 
        attr->action = action;
-       if (!actions_match_supported(priv, exts, parse_attr, flow))
+       if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
                return -EOPNOTSUPP;
 
        if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "current firmware doesn't support split rule for port mirroring");
                netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
                return -EOPNOTSUPP;
        }
@@ -2811,6 +2887,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
 int mlx5e_configure_flower(struct mlx5e_priv *priv,
                           struct tc_cls_flower_offload *f, int flags)
 {
+       struct netlink_ext_ack *extack = f->common.extack;
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5e_tc_flow_parse_attr *parse_attr;
        struct rhashtable *tc_ht = get_tc_ht(priv);
@@ -2822,6 +2899,8 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
 
        flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
        if (flow) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "flow cookie already exists, ignoring");
                netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie);
                return 0;
        }
@@ -2850,15 +2929,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
                goto err_free;
 
        if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
-               err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
+               err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow,
+                                          extack);
                if (err < 0)
                        goto err_free;
-               flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
+               flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow,
+                                                     extack);
        } else {
-               err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
+               err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow,
+                                          extack);
                if (err < 0)
                        goto err_free;
-               flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
+               flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow,
+                                                     extack);
        }
 
        if (IS_ERR(flow->rule[0])) {
@@ -2946,14 +3029,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
        return 0;
 }
 
+static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
+                                             struct mlx5e_priv *peer_priv)
+{
+       struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
+       struct mlx5e_hairpin_entry *hpe;
+       u16 peer_vhca_id;
+       int bkt;
+
+       if (!same_hw_devs(priv, peer_priv))
+               return;
+
+       peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
+
+       hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
+               if (hpe->peer_vhca_id == peer_vhca_id)
+                       hpe->hp->pair->peer_gone = true;
+       }
+}
+
+static int mlx5e_tc_netdev_event(struct notifier_block *this,
+                                unsigned long event, void *ptr)
+{
+       struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+       struct mlx5e_flow_steering *fs;
+       struct mlx5e_priv *peer_priv;
+       struct mlx5e_tc_table *tc;
+       struct mlx5e_priv *priv;
+
+       if (ndev->netdev_ops != &mlx5e_netdev_ops ||
+           event != NETDEV_UNREGISTER ||
+           ndev->reg_state == NETREG_REGISTERED)
+               return NOTIFY_DONE;
+
+       tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
+       fs = container_of(tc, struct mlx5e_flow_steering, tc);
+       priv = container_of(fs, struct mlx5e_priv, fs);
+       peer_priv = netdev_priv(ndev);
+       if (priv == peer_priv ||
+           !(priv->netdev->features & NETIF_F_HW_TC))
+               return NOTIFY_DONE;
+
+       mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
+
+       return NOTIFY_DONE;
+}
+
 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
 {
        struct mlx5e_tc_table *tc = &priv->fs.tc;
+       int err;
 
        hash_init(tc->mod_hdr_tbl);
        hash_init(tc->hairpin_tbl);
 
-       return rhashtable_init(&tc->ht, &tc_ht_params);
+       err = rhashtable_init(&tc->ht, &tc_ht_params);
+       if (err)
+               return err;
+
+       tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
+       if (register_netdevice_notifier(&tc->netdevice_nb)) {
+               tc->netdevice_nb.notifier_call = NULL;
+               mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
+       }
+
+       return err;
 }
 
 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
@@ -2969,6 +3109,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
 {
        struct mlx5e_tc_table *tc = &priv->fs.tc;
 
+       if (tc->netdevice_nb.notifier_call)
+               unregister_netdevice_notifier(&tc->netdevice_nb);
+
        rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
 
        if (!IS_ERR_OR_NULL(tc->t)) {
index 2b252cde5cc2db3cc6c90566e22d2187f328a99e..ea7dedc2d5adfc48081387619222c8e07da43bd4 100644 (file)
@@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
        u32 max_guarantee = 0;
        int i;
 
-       for (i = 0; i <= esw->total_vports; i++) {
+       for (i = 0; i < esw->total_vports; i++) {
                evport = &esw->vports[i];
                if (!evport->enabled || evport->info.min_rate < max_guarantee)
                        continue;
@@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
        int err;
        int i;
 
-       for (i = 0; i <= esw->total_vports; i++) {
+       for (i = 0; i < esw->total_vports; i++) {
                evport = &esw->vports[i];
                if (!evport->enabled)
                        continue;
index c17bfcab517c18745901d0c059b316bea1945203..dfc642de4e6d5783c56365c80e8ef0910a025233 100644 (file)
@@ -230,7 +230,8 @@ mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
                                struct mlx5_esw_flow_attr *attr);
 
 struct mlx5_flow_handle *
-mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
+mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
+                                 struct mlx5_flow_destination *dest);
 
 enum {
        SET_VLAN_STRIP  = BIT(0),
@@ -268,12 +269,15 @@ struct mlx5_esw_flow_attr {
        struct mlx5e_tc_flow_parse_attr *parse_attr;
 };
 
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+                                 struct netlink_ext_ack *extack);
 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
-int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
+int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
+                                        struct netlink_ext_ack *extack);
 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
-int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
+                                       struct netlink_ext_ack *extack);
 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
 
index 3028e8d90920e2940cc3247775bd7a498fd906ef..a35a2310f8718f0a7ae67f9859aaae20d41063b0 100644 (file)
@@ -775,10 +775,10 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
 }
 
 struct mlx5_flow_handle *
-mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
+mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
+                                 struct mlx5_flow_destination *dest)
 {
        struct mlx5_flow_act flow_act = {0};
-       struct mlx5_flow_destination dest = {};
        struct mlx5_flow_handle *flow_rule;
        struct mlx5_flow_spec *spec;
        void *misc;
@@ -796,12 +796,10 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
        MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
 
        spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
-       dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-       dest.tir_num = tirn;
 
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
        flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
-                                       &flow_act, &dest, 1);
+                                       &flow_act, dest, 1);
        if (IS_ERR(flow_rule)) {
                esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
                goto out;
@@ -812,29 +810,35 @@ out:
        return flow_rule;
 }
 
-static int esw_offloads_start(struct mlx5_eswitch *esw)
+static int esw_offloads_start(struct mlx5_eswitch *esw,
+                             struct netlink_ext_ack *extack)
 {
        int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 
        if (esw->mode != SRIOV_LEGACY) {
-               esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Can't set offloads mode, SRIOV legacy not enabled");
                return -EINVAL;
        }
 
        mlx5_eswitch_disable_sriov(esw);
        err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
        if (err) {
-               esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Failed setting eswitch to offloads");
                err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
-               if (err1)
-                       esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
+               if (err1) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Failed setting eswitch back to legacy");
+               }
        }
        if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
                if (mlx5_eswitch_inline_mode_get(esw,
                                                 num_vfs,
                                                 &esw->offloads.inline_mode)) {
                        esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
-                       esw_warn(esw->dev, "Inline mode is different between vports\n");
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Inline mode is different between vports");
                }
        }
        return err;
@@ -975,17 +979,20 @@ create_ft_err:
        return err;
 }
 
-static int esw_offloads_stop(struct mlx5_eswitch *esw)
+static int esw_offloads_stop(struct mlx5_eswitch *esw,
+                            struct netlink_ext_ack *extack)
 {
        int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 
        mlx5_eswitch_disable_sriov(esw);
        err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
        if (err) {
-               esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
+               NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
                err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
-               if (err1)
-                       esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
+               if (err1) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Failed setting eswitch back to offloads");
+               }
        }
 
        /* enable back PF RoCE */
@@ -1094,7 +1101,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
        return 0;
 }
 
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+                                 struct netlink_ext_ack *extack)
 {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
        u16 cur_mlx5_mode, mlx5_mode = 0;
@@ -1113,9 +1121,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
                return 0;
 
        if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
-               return esw_offloads_start(dev->priv.eswitch);
+               return esw_offloads_start(dev->priv.eswitch, extack);
        else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
-               return esw_offloads_stop(dev->priv.eswitch);
+               return esw_offloads_stop(dev->priv.eswitch, extack);
        else
                return -EINVAL;
 }
@@ -1132,7 +1140,8 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
        return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
 }
 
-int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
+int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
+                                        struct netlink_ext_ack *extack)
 {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
        struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -1149,14 +1158,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
                        return 0;
                /* fall through */
        case MLX5_CAP_INLINE_MODE_L2:
-               esw_warn(dev, "Inline mode can't be set\n");
+               NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
                return -EOPNOTSUPP;
        case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
                break;
        }
 
        if (esw->offloads.num_flows > 0) {
-               esw_warn(dev, "Can't set inline mode when flows are configured\n");
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Can't set inline mode when flows are configured");
                return -EOPNOTSUPP;
        }
 
@@ -1167,8 +1177,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
        for (vport = 1; vport < esw->enabled_vports; vport++) {
                err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
                if (err) {
-                       esw_warn(dev, "Failed to set min inline on vport %d\n",
-                                vport);
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Failed to set min inline on vport");
                        goto revert_inline_mode;
                }
        }
@@ -1234,7 +1244,8 @@ out:
        return 0;
 }
 
-int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
+                                       struct netlink_ext_ack *extack)
 {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
        struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -1261,7 +1272,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
                return 0;
 
        if (esw->offloads.num_flows > 0) {
-               esw_warn(dev, "Can't set encapsulation when flows are configured\n");
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Can't set encapsulation when flows are configured");
                return -EOPNOTSUPP;
        }
 
@@ -1270,7 +1282,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
        esw->offloads.encap = encap;
        err = esw_create_offloads_fast_fdb_table(esw);
        if (err) {
-               esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Failed re-creating fast FDB table");
                esw->offloads.encap = !encap;
                (void)esw_create_offloads_fast_fdb_table(esw);
        }
index 41ad24f0de2cf9d171e586df3b9d167515d3cb03..1ab6f7e3bec6268d542e8adecccca38f295edc16 100644 (file)
@@ -250,7 +250,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
        if (ret)
                return ret;
 
-       force_state = MLX5_GET(teardown_hca_out, out, force_state);
+       force_state = MLX5_GET(teardown_hca_out, out, state);
        if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
                mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n");
                return -EIO;
@@ -259,6 +259,54 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
        return 0;
 }
 
+#define MLX5_FAST_TEARDOWN_WAIT_MS   3000
+int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
+{
+       unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
+       u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
+       int state;
+       int ret;
+
+       if (!MLX5_CAP_GEN(dev, fast_teardown)) {
+               mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n");
+               return -EOPNOTSUPP;
+       }
+
+       MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
+       MLX5_SET(teardown_hca_in, in, profile,
+                MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN);
+
+       ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+       if (ret)
+               return ret;
+
+       state = MLX5_GET(teardown_hca_out, out, state);
+       if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
+               mlx5_core_warn(dev, "teardown with fast mode failed\n");
+               return -EIO;
+       }
+
+       mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
+
+       /* Loop until device state turns to disable */
+       end = jiffies + msecs_to_jiffies(delay_ms);
+       do {
+               if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+                       break;
+
+               cond_resched();
+       } while (!time_after(jiffies, end));
+
+       if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+               dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
+                       mlx5_get_nic_state(dev), delay_ms);
+               return -EIO;
+       }
+
+       return 0;
+}
+
 enum mlxsw_reg_mcc_instruction {
        MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
        MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
index 9f39aeca863f321fe169fb3d69ef1cff98bbaf18..43118de8ee99a19b29ed687c84b45b043f1cab7f 100644 (file)
@@ -58,23 +58,26 @@ enum {
        MLX5_HEALTH_SYNDR_HIGH_TEMP             = 0x10
 };
 
-enum {
-       MLX5_NIC_IFC_FULL               = 0,
-       MLX5_NIC_IFC_DISABLED           = 1,
-       MLX5_NIC_IFC_NO_DRAM_NIC        = 2,
-       MLX5_NIC_IFC_INVALID            = 3
-};
-
 enum {
        MLX5_DROP_NEW_HEALTH_WORK,
        MLX5_DROP_NEW_RECOVERY_WORK,
 };
 
-static u8 get_nic_state(struct mlx5_core_dev *dev)
+u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
 {
        return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
 }
 
+void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
+{
+       u32 cur_cmdq_addr_l_sz;
+
+       cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz);
+       iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) |
+                   state << MLX5_NIC_IFC_OFFSET,
+                   &dev->iseg->cmdq_addr_l_sz);
+}
+
 static void trigger_cmd_completions(struct mlx5_core_dev *dev)
 {
        unsigned long flags;
@@ -103,7 +106,7 @@ static int in_fatal(struct mlx5_core_dev *dev)
        struct mlx5_core_health *health = &dev->priv.health;
        struct health_buffer __iomem *h = health->health;
 
-       if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+       if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
                return 1;
 
        if (ioread32be(&h->fw_ver) == 0xffffffff)
@@ -133,7 +136,7 @@ unlock:
 
 static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
 {
-       u8 nic_interface = get_nic_state(dev);
+       u8 nic_interface = mlx5_get_nic_state(dev);
 
        switch (nic_interface) {
        case MLX5_NIC_IFC_FULL:
@@ -168,7 +171,7 @@ static void health_recover(struct work_struct *work)
        priv = container_of(health, struct mlx5_priv, health);
        dev = container_of(priv, struct mlx5_core_dev, priv);
 
-       nic_state = get_nic_state(dev);
+       nic_state = mlx5_get_nic_state(dev);
        if (nic_state == MLX5_NIC_IFC_INVALID) {
                dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
                return;
index a825ed093efd979c95b7fb9bfc99395a7efd856b..299e2a897f7ed0b9098adc15864cb8fb63ceeda2 100644 (file)
@@ -368,7 +368,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
        if (err)
                goto err_destroy_indirect_rqts;
 
-       err = mlx5e_create_indirect_tirs(priv);
+       err = mlx5e_create_indirect_tirs(priv, true);
        if (err)
                goto err_destroy_direct_rqts;
 
@@ -385,7 +385,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
 err_destroy_direct_tirs:
        mlx5e_destroy_direct_tirs(priv);
 err_destroy_indirect_tirs:
-       mlx5e_destroy_indirect_tirs(priv);
+       mlx5e_destroy_indirect_tirs(priv, true);
 err_destroy_direct_rqts:
        mlx5e_destroy_direct_rqts(priv);
 err_destroy_indirect_rqts:
@@ -401,7 +401,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
 {
        mlx5i_destroy_flow_steering(priv);
        mlx5e_destroy_direct_tirs(priv);
-       mlx5e_destroy_indirect_tirs(priv);
+       mlx5e_destroy_indirect_tirs(priv, true);
        mlx5e_destroy_direct_rqts(priv);
        mlx5e_destroy_rqt(priv, &priv->indir_rqt);
        mlx5e_close_drop_rq(&priv->drop_rq);
index b5e9f664fc66758d5642b18e2396503baf351415..28132c7dc05f252c6287a3fa6a8a37415de4872c 100644 (file)
@@ -1594,12 +1594,17 @@ static const struct pci_error_handlers mlx5_err_handler = {
 
 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
 {
-       int ret;
+       bool fast_teardown = false, force_teardown = false;
+       int ret = 1;
+
+       fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
+       force_teardown = MLX5_CAP_GEN(dev, force_teardown);
+
+       mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
+       mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
 
-       if (!MLX5_CAP_GEN(dev, force_teardown)) {
-               mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
+       if (!fast_teardown && !force_teardown)
                return -EOPNOTSUPP;
-       }
 
        if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
                mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
@@ -1612,13 +1617,19 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
        mlx5_drain_health_wq(dev);
        mlx5_stop_health_poll(dev, false);
 
+       ret = mlx5_cmd_fast_teardown_hca(dev);
+       if (!ret)
+               goto succeed;
+
        ret = mlx5_cmd_force_teardown_hca(dev);
-       if (ret) {
-               mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
-               mlx5_start_health_poll(dev);
-               return ret;
-       }
+       if (!ret)
+               goto succeed;
+
+       mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
+       mlx5_start_health_poll(dev);
+       return ret;
 
+succeed:
        mlx5_enter_error_state(dev, true);
 
        /* Some platforms requiring freeing the IRQ's in the shutdown
index b4134fa0bba36a3a4545dc65af2cec86b0984f0d..cc298527baf1687d3bafb8f5b345b9c6a8ad1aa9 100644 (file)
@@ -95,6 +95,8 @@ int mlx5_query_board_id(struct mlx5_core_dev *dev);
 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
 int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
+
 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
                     unsigned long param);
 void mlx5_core_page_fault(struct mlx5_core_dev *dev,
@@ -214,4 +216,14 @@ int mlx5_lag_allow(struct mlx5_core_dev *dev);
 int mlx5_lag_forbid(struct mlx5_core_dev *dev);
 
 void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
+
+enum {
+       MLX5_NIC_IFC_FULL               = 0,
+       MLX5_NIC_IFC_DISABLED           = 1,
+       MLX5_NIC_IFC_NO_DRAM_NIC        = 2,
+       MLX5_NIC_IFC_INVALID            = 3
+};
+
+u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
+void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
 #endif /* __MLX5_CORE_H__ */
index d2f76070ea7ca87bcc98c1382cd95504e0a60104..a1ee9a8a769e8a96e2c25f84454772159bb4bd16 100644 (file)
@@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
 
        for (i = 0; i < hp->num_channels; i++) {
                mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
-               mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+               if (!hp->peer_gone)
+                       mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
        }
 }
 
@@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
                                       MLX5_RQC_STATE_RST, 0, 0);
 
        /* unset peer SQs */
+       if (hp->peer_gone)
+               return;
        for (i = 0; i < hp->num_channels; i++)
                mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
                                       MLX5_SQC_STATE_RST, 0, 0);
index b02af317c1255698b5ee09e49451006315b86f3c..cfbea66b48798ba495c510ed6470619e6c6ff253 100644 (file)
@@ -1201,3 +1201,12 @@ int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
+
+u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
+{
+       if (!mdev->sys_image_guid)
+               mlx5_query_nic_vport_system_image_guid(mdev, &mdev->sys_image_guid);
+
+       return mdev->sys_image_guid;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
index 4d271fb3de3d2ba6f556f9869cb3720ce7a8f43c..5890fdfd62c377d9444d04589f0bf455d4ef6229 100644 (file)
@@ -718,14 +718,17 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
        memset(&active_cqns, 0, sizeof(active_cqns));
 
        while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
-               u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
 
-               switch (event_type) {
-               case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+               /* Command interface completion events are always received on
+                * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
+                * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
+                */
+               switch (q->num) {
+               case MLXSW_PCI_EQ_ASYNC_NUM:
                        mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
                        q->u.eq.ev_cmd_count++;
                        break;
-               case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+               case MLXSW_PCI_EQ_COMP_NUM:
                        cqn = mlxsw_pci_eqe_cqn_get(eqe);
                        set_bit(cqn, active_cqns);
                        cq_handle = true;
index 88c33a8474ebedf509c63d5bff6fb406376623dd..2b14fd0dcc4282966828143be4507f9de3165acb 100644 (file)
@@ -4855,6 +4855,8 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
                upper_dev = info->upper_dev;
                if (info->linking)
                        break;
+               if (is_vlan_dev(upper_dev))
+                       mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
                if (netif_is_macvlan(upper_dev))
                        mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
                break;
index 36c84625d54ef0c610005ce90bc87d6acb62d69a..bcec0587cf6123ecf68cf484ab41c3f55549beab 100644 (file)
@@ -23,6 +23,8 @@ config MSCC_OCELOT_SWITCH
 config MSCC_OCELOT_SWITCH_OCELOT
        tristate "Ocelot switch driver on Ocelot"
        depends on MSCC_OCELOT_SWITCH
+       depends on GENERIC_PHY
+       depends on OF_NET
        help
          This driver supports the Ocelot network switch device as present on
          the Ocelot SoCs.
index 1a4f2bb48ead712634ce5968e23144117d89b8d7..8f11fdba8d0edf9551432a3bfe136d1ffe2f6c86 100644 (file)
@@ -472,6 +472,7 @@ static int ocelot_port_open(struct net_device *dev)
 {
        struct ocelot_port *port = netdev_priv(dev);
        struct ocelot *ocelot = port->ocelot;
+       enum phy_mode phy_mode;
        int err;
 
        /* Enable receiving frames on the port, and activate auto-learning of
@@ -482,8 +483,21 @@ static int ocelot_port_open(struct net_device *dev)
                         ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port),
                         ANA_PORT_PORT_CFG, port->chip_port);
 
+       if (port->serdes) {
+               if (port->phy_mode == PHY_INTERFACE_MODE_SGMII)
+                       phy_mode = PHY_MODE_SGMII;
+               else
+                       phy_mode = PHY_MODE_QSGMII;
+
+               err = phy_set_mode(port->serdes, phy_mode);
+               if (err) {
+                       netdev_err(dev, "Could not set mode of SerDes\n");
+                       return err;
+               }
+       }
+
        err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link,
-                                PHY_INTERFACE_MODE_NA);
+                                port->phy_mode);
        if (err) {
                netdev_err(dev, "Could not attach to PHY\n");
                return err;
index 616bec30dfa3fe4b31a1295ef2239a57ac2cc36d..62c7c8eb00d93006881ae726902a171602369ef5 100644 (file)
 #include <linux/bitops.h>
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
 
 #include "ocelot_ana.h"
 #include "ocelot_dev.h"
-#include "ocelot_hsio.h"
 #include "ocelot_qsys.h"
 #include "ocelot_rew.h"
 #include "ocelot_sys.h"
@@ -333,79 +334,6 @@ enum ocelot_reg {
        SYS_CM_DATA_RD,
        SYS_CM_OP,
        SYS_CM_DATA,
-       HSIO_PLL5G_CFG0 = HSIO << TARGET_OFFSET,
-       HSIO_PLL5G_CFG1,
-       HSIO_PLL5G_CFG2,
-       HSIO_PLL5G_CFG3,
-       HSIO_PLL5G_CFG4,
-       HSIO_PLL5G_CFG5,
-       HSIO_PLL5G_CFG6,
-       HSIO_PLL5G_STATUS0,
-       HSIO_PLL5G_STATUS1,
-       HSIO_PLL5G_BIST_CFG0,
-       HSIO_PLL5G_BIST_CFG1,
-       HSIO_PLL5G_BIST_CFG2,
-       HSIO_PLL5G_BIST_STAT0,
-       HSIO_PLL5G_BIST_STAT1,
-       HSIO_RCOMP_CFG0,
-       HSIO_RCOMP_STATUS,
-       HSIO_SYNC_ETH_CFG,
-       HSIO_SYNC_ETH_PLL_CFG,
-       HSIO_S1G_DES_CFG,
-       HSIO_S1G_IB_CFG,
-       HSIO_S1G_OB_CFG,
-       HSIO_S1G_SER_CFG,
-       HSIO_S1G_COMMON_CFG,
-       HSIO_S1G_PLL_CFG,
-       HSIO_S1G_PLL_STATUS,
-       HSIO_S1G_DFT_CFG0,
-       HSIO_S1G_DFT_CFG1,
-       HSIO_S1G_DFT_CFG2,
-       HSIO_S1G_TP_CFG,
-       HSIO_S1G_RC_PLL_BIST_CFG,
-       HSIO_S1G_MISC_CFG,
-       HSIO_S1G_DFT_STATUS,
-       HSIO_S1G_MISC_STATUS,
-       HSIO_MCB_S1G_ADDR_CFG,
-       HSIO_S6G_DIG_CFG,
-       HSIO_S6G_DFT_CFG0,
-       HSIO_S6G_DFT_CFG1,
-       HSIO_S6G_DFT_CFG2,
-       HSIO_S6G_TP_CFG0,
-       HSIO_S6G_TP_CFG1,
-       HSIO_S6G_RC_PLL_BIST_CFG,
-       HSIO_S6G_MISC_CFG,
-       HSIO_S6G_OB_ANEG_CFG,
-       HSIO_S6G_DFT_STATUS,
-       HSIO_S6G_ERR_CNT,
-       HSIO_S6G_MISC_STATUS,
-       HSIO_S6G_DES_CFG,
-       HSIO_S6G_IB_CFG,
-       HSIO_S6G_IB_CFG1,
-       HSIO_S6G_IB_CFG2,
-       HSIO_S6G_IB_CFG3,
-       HSIO_S6G_IB_CFG4,
-       HSIO_S6G_IB_CFG5,
-       HSIO_S6G_OB_CFG,
-       HSIO_S6G_OB_CFG1,
-       HSIO_S6G_SER_CFG,
-       HSIO_S6G_COMMON_CFG,
-       HSIO_S6G_PLL_CFG,
-       HSIO_S6G_ACJTAG_CFG,
-       HSIO_S6G_GP_CFG,
-       HSIO_S6G_IB_STATUS0,
-       HSIO_S6G_IB_STATUS1,
-       HSIO_S6G_ACJTAG_STATUS,
-       HSIO_S6G_PLL_STATUS,
-       HSIO_S6G_REVID,
-       HSIO_MCB_S6G_ADDR_CFG,
-       HSIO_HW_CFG,
-       HSIO_HW_QSGMII_CFG,
-       HSIO_HW_QSGMII_STAT,
-       HSIO_CLK_CFG,
-       HSIO_TEMP_SENSOR_CTRL,
-       HSIO_TEMP_SENSOR_CFG,
-       HSIO_TEMP_SENSOR_STAT,
 };
 
 enum ocelot_regfield {
@@ -527,6 +455,9 @@ struct ocelot_port {
        u8 vlan_aware;
 
        u64 *stats;
+
+       phy_interface_t phy_mode;
+       struct phy *serdes;
 };
 
 u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
index 3cdf63e35b53bc1ea3e2f445e6a3746a56655bd3..0cf0b0935b3b00bc4ac5e5cf0dd218723ca63f06 100644 (file)
@@ -6,9 +6,11 @@
  */
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/of_net.h>
 #include <linux/netdevice.h>
 #include <linux/of_mdio.h>
 #include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
 #include <linux/skbuff.h>
 
 #include "ocelot.h"
@@ -168,6 +170,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        struct device_node *ports, *portnp;
        struct ocelot *ocelot;
+       struct regmap *hsio;
        u32 val;
 
        struct {
@@ -179,7 +182,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
                { QSYS, "qsys" },
                { ANA, "ana" },
                { QS, "qs" },
-               { HSIO, "hsio" },
        };
 
        if (!np && !pdev->dev.platform_data)
@@ -202,6 +204,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
                ocelot->targets[res[i].id] = target;
        }
 
+       hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
+       if (IS_ERR(hsio)) {
+               dev_err(&pdev->dev, "missing hsio syscon\n");
+               return PTR_ERR(hsio);
+       }
+
+       ocelot->targets[HSIO] = hsio;
+
        err = ocelot_chip_init(ocelot);
        if (err)
                return err;
@@ -244,18 +254,11 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
        INIT_LIST_HEAD(&ocelot->multicast);
        ocelot_init(ocelot);
 
-       ocelot_rmw(ocelot, HSIO_HW_CFG_DEV1G_4_MODE |
-                    HSIO_HW_CFG_DEV1G_6_MODE |
-                    HSIO_HW_CFG_DEV1G_9_MODE,
-                    HSIO_HW_CFG_DEV1G_4_MODE |
-                    HSIO_HW_CFG_DEV1G_6_MODE |
-                    HSIO_HW_CFG_DEV1G_9_MODE,
-                    HSIO_HW_CFG);
-
        for_each_available_child_of_node(ports, portnp) {
                struct device_node *phy_node;
                struct phy_device *phy;
                struct resource *res;
+               struct phy *serdes;
                void __iomem *regs;
                char res_name[8];
                u32 port;
@@ -280,10 +283,43 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
                        continue;
 
                err = ocelot_probe_port(ocelot, port, regs, phy);
-               if (err) {
-                       dev_err(&pdev->dev, "failed to probe ports\n");
+               if (err)
+                       return err;
+
+               err = of_get_phy_mode(portnp);
+               if (err < 0)
+                       ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA;
+               else
+                       ocelot->ports[port]->phy_mode = err;
+
+               switch (ocelot->ports[port]->phy_mode) {
+               case PHY_INTERFACE_MODE_NA:
+                       continue;
+               case PHY_INTERFACE_MODE_SGMII:
+                       break;
+               case PHY_INTERFACE_MODE_QSGMII:
+                       break;
+               default:
+                       dev_err(ocelot->dev,
+                               "invalid phy mode for port%d, (Q)SGMII only\n",
+                               port);
+                       return -EINVAL;
+               }
+
+               serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
+               if (IS_ERR(serdes)) {
+                       err = PTR_ERR(serdes);
+                       if (err == -EPROBE_DEFER)
+                               dev_dbg(ocelot->dev, "deferring probe\n");
+                       else
+                               dev_err(ocelot->dev,
+                                       "missing SerDes phys for port%d\n",
+                                       port);
+
                        goto err_probe_ports;
                }
+
+               ocelot->ports[port]->serdes = serdes;
        }
 
        register_netdevice_notifier(&ocelot_netdevice_nb);
diff --git a/drivers/net/ethernet/mscc/ocelot_hsio.h b/drivers/net/ethernet/mscc/ocelot_hsio.h
deleted file mode 100644 (file)
index d93ddec..0000000
+++ /dev/null
@@ -1,785 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_HSIO_H_
-#define _MSCC_OCELOT_HSIO_H_
-
-#define HSIO_PLL5G_CFG0_ENA_ROT                           BIT(31)
-#define HSIO_PLL5G_CFG0_ENA_LANE                          BIT(30)
-#define HSIO_PLL5G_CFG0_ENA_CLKTREE                       BIT(29)
-#define HSIO_PLL5G_CFG0_DIV4                              BIT(28)
-#define HSIO_PLL5G_CFG0_ENA_LOCK_FINE                     BIT(27)
-#define HSIO_PLL5G_CFG0_SELBGV820(x)                      (((x) << 23) & GENMASK(26, 23))
-#define HSIO_PLL5G_CFG0_SELBGV820_M                       GENMASK(26, 23)
-#define HSIO_PLL5G_CFG0_SELBGV820_X(x)                    (((x) & GENMASK(26, 23)) >> 23)
-#define HSIO_PLL5G_CFG0_LOOP_BW_RES(x)                    (((x) << 18) & GENMASK(22, 18))
-#define HSIO_PLL5G_CFG0_LOOP_BW_RES_M                     GENMASK(22, 18)
-#define HSIO_PLL5G_CFG0_LOOP_BW_RES_X(x)                  (((x) & GENMASK(22, 18)) >> 18)
-#define HSIO_PLL5G_CFG0_SELCPI(x)                         (((x) << 16) & GENMASK(17, 16))
-#define HSIO_PLL5G_CFG0_SELCPI_M                          GENMASK(17, 16)
-#define HSIO_PLL5G_CFG0_SELCPI_X(x)                       (((x) & GENMASK(17, 16)) >> 16)
-#define HSIO_PLL5G_CFG0_ENA_VCO_CONTRH                    BIT(15)
-#define HSIO_PLL5G_CFG0_ENA_CP1                           BIT(14)
-#define HSIO_PLL5G_CFG0_ENA_VCO_BUF                       BIT(13)
-#define HSIO_PLL5G_CFG0_ENA_BIAS                          BIT(12)
-#define HSIO_PLL5G_CFG0_CPU_CLK_DIV(x)                    (((x) << 6) & GENMASK(11, 6))
-#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_M                     GENMASK(11, 6)
-#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_X(x)                  (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_PLL5G_CFG0_CORE_CLK_DIV(x)                   ((x) & GENMASK(5, 0))
-#define HSIO_PLL5G_CFG0_CORE_CLK_DIV_M                    GENMASK(5, 0)
-
-#define HSIO_PLL5G_CFG1_ENA_DIRECT                        BIT(18)
-#define HSIO_PLL5G_CFG1_ROT_SPEED                         BIT(17)
-#define HSIO_PLL5G_CFG1_ROT_DIR                           BIT(16)
-#define HSIO_PLL5G_CFG1_READBACK_DATA_SEL                 BIT(15)
-#define HSIO_PLL5G_CFG1_RC_ENABLE                         BIT(14)
-#define HSIO_PLL5G_CFG1_RC_CTRL_DATA(x)                   (((x) << 6) & GENMASK(13, 6))
-#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_M                    GENMASK(13, 6)
-#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_X(x)                 (((x) & GENMASK(13, 6)) >> 6)
-#define HSIO_PLL5G_CFG1_QUARTER_RATE                      BIT(5)
-#define HSIO_PLL5G_CFG1_PWD_TX                            BIT(4)
-#define HSIO_PLL5G_CFG1_PWD_RX                            BIT(3)
-#define HSIO_PLL5G_CFG1_OUT_OF_RANGE_RECAL_ENA            BIT(2)
-#define HSIO_PLL5G_CFG1_HALF_RATE                         BIT(1)
-#define HSIO_PLL5G_CFG1_FORCE_SET_ENA                     BIT(0)
-
-#define HSIO_PLL5G_CFG2_ENA_TEST_MODE                     BIT(30)
-#define HSIO_PLL5G_CFG2_ENA_PFD_IN_FLIP                   BIT(29)
-#define HSIO_PLL5G_CFG2_ENA_VCO_NREF_TESTOUT              BIT(28)
-#define HSIO_PLL5G_CFG2_ENA_FBTESTOUT                     BIT(27)
-#define HSIO_PLL5G_CFG2_ENA_RCPLL                         BIT(26)
-#define HSIO_PLL5G_CFG2_ENA_CP2                           BIT(25)
-#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS1                   BIT(24)
-#define HSIO_PLL5G_CFG2_AMPC_SEL(x)                       (((x) << 16) & GENMASK(23, 16))
-#define HSIO_PLL5G_CFG2_AMPC_SEL_M                        GENMASK(23, 16)
-#define HSIO_PLL5G_CFG2_AMPC_SEL_X(x)                     (((x) & GENMASK(23, 16)) >> 16)
-#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS                    BIT(15)
-#define HSIO_PLL5G_CFG2_PWD_AMPCTRL_N                     BIT(14)
-#define HSIO_PLL5G_CFG2_ENA_AMPCTRL                       BIT(13)
-#define HSIO_PLL5G_CFG2_ENA_AMP_CTRL_FORCE                BIT(12)
-#define HSIO_PLL5G_CFG2_FRC_FSM_POR                       BIT(11)
-#define HSIO_PLL5G_CFG2_DISABLE_FSM_POR                   BIT(10)
-#define HSIO_PLL5G_CFG2_GAIN_TEST(x)                      (((x) << 5) & GENMASK(9, 5))
-#define HSIO_PLL5G_CFG2_GAIN_TEST_M                       GENMASK(9, 5)
-#define HSIO_PLL5G_CFG2_GAIN_TEST_X(x)                    (((x) & GENMASK(9, 5)) >> 5)
-#define HSIO_PLL5G_CFG2_EN_RESET_OVERRUN                  BIT(4)
-#define HSIO_PLL5G_CFG2_EN_RESET_LIM_DET                  BIT(3)
-#define HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET                  BIT(2)
-#define HSIO_PLL5G_CFG2_DISABLE_FSM                       BIT(1)
-#define HSIO_PLL5G_CFG2_ENA_GAIN_TEST                     BIT(0)
-
-#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL(x)               (((x) << 22) & GENMASK(23, 22))
-#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_M                GENMASK(23, 22)
-#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_X(x)             (((x) & GENMASK(23, 22)) >> 22)
-#define HSIO_PLL5G_CFG3_TESTOUT_SEL(x)                    (((x) << 19) & GENMASK(21, 19))
-#define HSIO_PLL5G_CFG3_TESTOUT_SEL_M                     GENMASK(21, 19)
-#define HSIO_PLL5G_CFG3_TESTOUT_SEL_X(x)                  (((x) & GENMASK(21, 19)) >> 19)
-#define HSIO_PLL5G_CFG3_ENA_ANA_TEST_OUT                  BIT(18)
-#define HSIO_PLL5G_CFG3_ENA_TEST_OUT                      BIT(17)
-#define HSIO_PLL5G_CFG3_SEL_FBDCLK                        BIT(16)
-#define HSIO_PLL5G_CFG3_SEL_CML_CMOS_PFD                  BIT(15)
-#define HSIO_PLL5G_CFG3_RST_FB_N                          BIT(14)
-#define HSIO_PLL5G_CFG3_FORCE_VCO_CONTRH                  BIT(13)
-#define HSIO_PLL5G_CFG3_FORCE_LO                          BIT(12)
-#define HSIO_PLL5G_CFG3_FORCE_HI                          BIT(11)
-#define HSIO_PLL5G_CFG3_FORCE_ENA                         BIT(10)
-#define HSIO_PLL5G_CFG3_FORCE_CP                          BIT(9)
-#define HSIO_PLL5G_CFG3_FBDIVSEL_TST_ENA                  BIT(8)
-#define HSIO_PLL5G_CFG3_FBDIVSEL(x)                       ((x) & GENMASK(7, 0))
-#define HSIO_PLL5G_CFG3_FBDIVSEL_M                        GENMASK(7, 0)
-
-#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL(x)                   (((x) << 16) & GENMASK(23, 16))
-#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_M                    GENMASK(23, 16)
-#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_X(x)                 (((x) & GENMASK(23, 16)) >> 16)
-#define HSIO_PLL5G_CFG4_IB_CTRL(x)                        ((x) & GENMASK(15, 0))
-#define HSIO_PLL5G_CFG4_IB_CTRL_M                         GENMASK(15, 0)
-
-#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL(x)                   (((x) << 16) & GENMASK(23, 16))
-#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_M                    GENMASK(23, 16)
-#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_X(x)                 (((x) & GENMASK(23, 16)) >> 16)
-#define HSIO_PLL5G_CFG5_OB_CTRL(x)                        ((x) & GENMASK(15, 0))
-#define HSIO_PLL5G_CFG5_OB_CTRL_M                         GENMASK(15, 0)
-
-#define HSIO_PLL5G_CFG6_REFCLK_SEL_SRC                    BIT(23)
-#define HSIO_PLL5G_CFG6_REFCLK_SEL(x)                     (((x) << 20) & GENMASK(22, 20))
-#define HSIO_PLL5G_CFG6_REFCLK_SEL_M                      GENMASK(22, 20)
-#define HSIO_PLL5G_CFG6_REFCLK_SEL_X(x)                   (((x) & GENMASK(22, 20)) >> 20)
-#define HSIO_PLL5G_CFG6_REFCLK_SRC                        BIT(19)
-#define HSIO_PLL5G_CFG6_POR_DEL_SEL(x)                    (((x) << 16) & GENMASK(17, 16))
-#define HSIO_PLL5G_CFG6_POR_DEL_SEL_M                     GENMASK(17, 16)
-#define HSIO_PLL5G_CFG6_POR_DEL_SEL_X(x)                  (((x) & GENMASK(17, 16)) >> 16)
-#define HSIO_PLL5G_CFG6_DIV125REF_SEL(x)                  (((x) << 8) & GENMASK(15, 8))
-#define HSIO_PLL5G_CFG6_DIV125REF_SEL_M                   GENMASK(15, 8)
-#define HSIO_PLL5G_CFG6_DIV125REF_SEL_X(x)                (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_PLL5G_CFG6_ENA_REFCLKC2                      BIT(7)
-#define HSIO_PLL5G_CFG6_ENA_FBCLKC2                       BIT(6)
-#define HSIO_PLL5G_CFG6_DDR_CLK_DIV(x)                    ((x) & GENMASK(5, 0))
-#define HSIO_PLL5G_CFG6_DDR_CLK_DIV_M                     GENMASK(5, 0)
-
-#define HSIO_PLL5G_STATUS0_RANGE_LIM                      BIT(12)
-#define HSIO_PLL5G_STATUS0_OUT_OF_RANGE_ERR               BIT(11)
-#define HSIO_PLL5G_STATUS0_CALIBRATION_ERR                BIT(10)
-#define HSIO_PLL5G_STATUS0_CALIBRATION_DONE               BIT(9)
-#define HSIO_PLL5G_STATUS0_READBACK_DATA(x)               (((x) << 1) & GENMASK(8, 1))
-#define HSIO_PLL5G_STATUS0_READBACK_DATA_M                GENMASK(8, 1)
-#define HSIO_PLL5G_STATUS0_READBACK_DATA_X(x)             (((x) & GENMASK(8, 1)) >> 1)
-#define HSIO_PLL5G_STATUS0_LOCK_STATUS                    BIT(0)
-
-#define HSIO_PLL5G_STATUS1_SIG_DEL(x)                     (((x) << 21) & GENMASK(28, 21))
-#define HSIO_PLL5G_STATUS1_SIG_DEL_M                      GENMASK(28, 21)
-#define HSIO_PLL5G_STATUS1_SIG_DEL_X(x)                   (((x) & GENMASK(28, 21)) >> 21)
-#define HSIO_PLL5G_STATUS1_GAIN_STAT(x)                   (((x) << 16) & GENMASK(20, 16))
-#define HSIO_PLL5G_STATUS1_GAIN_STAT_M                    GENMASK(20, 16)
-#define HSIO_PLL5G_STATUS1_GAIN_STAT_X(x)                 (((x) & GENMASK(20, 16)) >> 16)
-#define HSIO_PLL5G_STATUS1_FBCNT_DIF(x)                   (((x) << 4) & GENMASK(13, 4))
-#define HSIO_PLL5G_STATUS1_FBCNT_DIF_M                    GENMASK(13, 4)
-#define HSIO_PLL5G_STATUS1_FBCNT_DIF_X(x)                 (((x) & GENMASK(13, 4)) >> 4)
-#define HSIO_PLL5G_STATUS1_FSM_STAT(x)                    (((x) << 1) & GENMASK(3, 1))
-#define HSIO_PLL5G_STATUS1_FSM_STAT_M                     GENMASK(3, 1)
-#define HSIO_PLL5G_STATUS1_FSM_STAT_X(x)                  (((x) & GENMASK(3, 1)) >> 1)
-#define HSIO_PLL5G_STATUS1_FSM_LOCK                       BIT(0)
-
-#define HSIO_PLL5G_BIST_CFG0_PLLB_START_BIST              BIT(31)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_MEAS_MODE               BIT(30)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT(x)          (((x) << 20) & GENMASK(23, 20))
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_M           GENMASK(23, 20)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_X(x)        (((x) & GENMASK(23, 20)) >> 20)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT(x)          (((x) << 16) & GENMASK(19, 16))
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_M           GENMASK(19, 16)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_X(x)        (((x) & GENMASK(19, 16)) >> 16)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE(x)       ((x) & GENMASK(15, 0))
-#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE_M        GENMASK(15, 0)
-
-#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT(x)            (((x) << 4) & GENMASK(7, 4))
-#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_M             GENMASK(7, 4)
-#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_X(x)          (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_PLL5G_BIST_STAT0_PLLB_BUSY                   BIT(2)
-#define HSIO_PLL5G_BIST_STAT0_PLLB_DONE_N                 BIT(1)
-#define HSIO_PLL5G_BIST_STAT0_PLLB_FAIL                   BIT(0)
-
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT(x)             (((x) << 16) & GENMASK(31, 16))
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_M              GENMASK(31, 16)
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_X(x)           (((x) & GENMASK(31, 16)) >> 16)
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF(x)        ((x) & GENMASK(15, 0))
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF_M         GENMASK(15, 0)
-
-#define HSIO_RCOMP_CFG0_PWD_ENA                           BIT(13)
-#define HSIO_RCOMP_CFG0_RUN_CAL                           BIT(12)
-#define HSIO_RCOMP_CFG0_SPEED_SEL(x)                      (((x) << 10) & GENMASK(11, 10))
-#define HSIO_RCOMP_CFG0_SPEED_SEL_M                       GENMASK(11, 10)
-#define HSIO_RCOMP_CFG0_SPEED_SEL_X(x)                    (((x) & GENMASK(11, 10)) >> 10)
-#define HSIO_RCOMP_CFG0_MODE_SEL(x)                       (((x) << 8) & GENMASK(9, 8))
-#define HSIO_RCOMP_CFG0_MODE_SEL_M                        GENMASK(9, 8)
-#define HSIO_RCOMP_CFG0_MODE_SEL_X(x)                     (((x) & GENMASK(9, 8)) >> 8)
-#define HSIO_RCOMP_CFG0_FORCE_ENA                         BIT(4)
-#define HSIO_RCOMP_CFG0_RCOMP_VAL(x)                      ((x) & GENMASK(3, 0))
-#define HSIO_RCOMP_CFG0_RCOMP_VAL_M                       GENMASK(3, 0)
-
-#define HSIO_RCOMP_STATUS_BUSY                            BIT(12)
-#define HSIO_RCOMP_STATUS_DELTA_ALERT                     BIT(7)
-#define HSIO_RCOMP_STATUS_RCOMP(x)                        ((x) & GENMASK(3, 0))
-#define HSIO_RCOMP_STATUS_RCOMP_M                         GENMASK(3, 0)
-
-#define HSIO_SYNC_ETH_CFG_RSZ                             0x4
-
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC(x)             (((x) << 4) & GENMASK(7, 4))
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_M              GENMASK(7, 4)
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_X(x)           (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV(x)             (((x) << 1) & GENMASK(3, 1))
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_M              GENMASK(3, 1)
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_X(x)           (((x) & GENMASK(3, 1)) >> 1)
-#define HSIO_SYNC_ETH_CFG_RECO_CLK_ENA                    BIT(0)
-
-#define HSIO_SYNC_ETH_PLL_CFG_PLL_AUTO_SQUELCH_ENA        BIT(0)
-
-#define HSIO_S1G_DES_CFG_DES_PHS_CTRL(x)                  (((x) << 13) & GENMASK(16, 13))
-#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_M                   GENMASK(16, 13)
-#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_X(x)                (((x) & GENMASK(16, 13)) >> 13)
-#define HSIO_S1G_DES_CFG_DES_CPMD_SEL(x)                  (((x) << 11) & GENMASK(12, 11))
-#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_M                   GENMASK(12, 11)
-#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_X(x)                (((x) & GENMASK(12, 11)) >> 11)
-#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL(x)                 (((x) << 8) & GENMASK(10, 8))
-#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_M                  GENMASK(10, 8)
-#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_X(x)               (((x) & GENMASK(10, 8)) >> 8)
-#define HSIO_S1G_DES_CFG_DES_BW_ANA(x)                    (((x) << 5) & GENMASK(7, 5))
-#define HSIO_S1G_DES_CFG_DES_BW_ANA_M                     GENMASK(7, 5)
-#define HSIO_S1G_DES_CFG_DES_BW_ANA_X(x)                  (((x) & GENMASK(7, 5)) >> 5)
-#define HSIO_S1G_DES_CFG_DES_SWAP_ANA                     BIT(4)
-#define HSIO_S1G_DES_CFG_DES_BW_HYST(x)                   (((x) << 1) & GENMASK(3, 1))
-#define HSIO_S1G_DES_CFG_DES_BW_HYST_M                    GENMASK(3, 1)
-#define HSIO_S1G_DES_CFG_DES_BW_HYST_X(x)                 (((x) & GENMASK(3, 1)) >> 1)
-#define HSIO_S1G_DES_CFG_DES_SWAP_HYST                    BIT(0)
-
-#define HSIO_S1G_IB_CFG_IB_FX100_ENA                      BIT(27)
-#define HSIO_S1G_IB_CFG_ACJTAG_HYST(x)                    (((x) << 24) & GENMASK(26, 24))
-#define HSIO_S1G_IB_CFG_ACJTAG_HYST_M                     GENMASK(26, 24)
-#define HSIO_S1G_IB_CFG_ACJTAG_HYST_X(x)                  (((x) & GENMASK(26, 24)) >> 24)
-#define HSIO_S1G_IB_CFG_IB_DET_LEV(x)                     (((x) << 19) & GENMASK(21, 19))
-#define HSIO_S1G_IB_CFG_IB_DET_LEV_M                      GENMASK(21, 19)
-#define HSIO_S1G_IB_CFG_IB_DET_LEV_X(x)                   (((x) & GENMASK(21, 19)) >> 19)
-#define HSIO_S1G_IB_CFG_IB_HYST_LEV                       BIT(14)
-#define HSIO_S1G_IB_CFG_IB_ENA_CMV_TERM                   BIT(13)
-#define HSIO_S1G_IB_CFG_IB_ENA_DC_COUPLING                BIT(12)
-#define HSIO_S1G_IB_CFG_IB_ENA_DETLEV                     BIT(11)
-#define HSIO_S1G_IB_CFG_IB_ENA_HYST                       BIT(10)
-#define HSIO_S1G_IB_CFG_IB_ENA_OFFSET_COMP                BIT(9)
-#define HSIO_S1G_IB_CFG_IB_EQ_GAIN(x)                     (((x) << 6) & GENMASK(8, 6))
-#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_M                      GENMASK(8, 6)
-#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_X(x)                   (((x) & GENMASK(8, 6)) >> 6)
-#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ(x)             (((x) << 4) & GENMASK(5, 4))
-#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_M              GENMASK(5, 4)
-#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_X(x)           (((x) & GENMASK(5, 4)) >> 4)
-#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL(x)               ((x) & GENMASK(3, 0))
-#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL_M                GENMASK(3, 0)
-
-#define HSIO_S1G_OB_CFG_OB_SLP(x)                         (((x) << 17) & GENMASK(18, 17))
-#define HSIO_S1G_OB_CFG_OB_SLP_M                          GENMASK(18, 17)
-#define HSIO_S1G_OB_CFG_OB_SLP_X(x)                       (((x) & GENMASK(18, 17)) >> 17)
-#define HSIO_S1G_OB_CFG_OB_AMP_CTRL(x)                    (((x) << 13) & GENMASK(16, 13))
-#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_M                     GENMASK(16, 13)
-#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_X(x)                  (((x) & GENMASK(16, 13)) >> 13)
-#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL(x)               (((x) << 10) & GENMASK(12, 10))
-#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_M                GENMASK(12, 10)
-#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_X(x)             (((x) & GENMASK(12, 10)) >> 10)
-#define HSIO_S1G_OB_CFG_OB_DIS_VCM_CTRL                   BIT(9)
-#define HSIO_S1G_OB_CFG_OB_EN_MEAS_VREG                   BIT(8)
-#define HSIO_S1G_OB_CFG_OB_VCM_CTRL(x)                    (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_M                     GENMASK(7, 4)
-#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_X(x)                  (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL(x)               ((x) & GENMASK(3, 0))
-#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL_M                GENMASK(3, 0)
-
-#define HSIO_S1G_SER_CFG_SER_IDLE                         BIT(9)
-#define HSIO_S1G_SER_CFG_SER_DEEMPH                       BIT(8)
-#define HSIO_S1G_SER_CFG_SER_CPMD_SEL                     BIT(7)
-#define HSIO_S1G_SER_CFG_SER_SWAP_CPMD                    BIT(6)
-#define HSIO_S1G_SER_CFG_SER_ALISEL(x)                    (((x) << 4) & GENMASK(5, 4))
-#define HSIO_S1G_SER_CFG_SER_ALISEL_M                     GENMASK(5, 4)
-#define HSIO_S1G_SER_CFG_SER_ALISEL_X(x)                  (((x) & GENMASK(5, 4)) >> 4)
-#define HSIO_S1G_SER_CFG_SER_ENHYS                        BIT(3)
-#define HSIO_S1G_SER_CFG_SER_BIG_WIN                      BIT(2)
-#define HSIO_S1G_SER_CFG_SER_EN_WIN                       BIT(1)
-#define HSIO_S1G_SER_CFG_SER_ENALI                        BIT(0)
-
-#define HSIO_S1G_COMMON_CFG_SYS_RST                       BIT(31)
-#define HSIO_S1G_COMMON_CFG_SE_AUTO_SQUELCH_ENA           BIT(21)
-#define HSIO_S1G_COMMON_CFG_ENA_LANE                      BIT(18)
-#define HSIO_S1G_COMMON_CFG_PWD_RX                        BIT(17)
-#define HSIO_S1G_COMMON_CFG_PWD_TX                        BIT(16)
-#define HSIO_S1G_COMMON_CFG_LANE_CTRL(x)                  (((x) << 13) & GENMASK(15, 13))
-#define HSIO_S1G_COMMON_CFG_LANE_CTRL_M                   GENMASK(15, 13)
-#define HSIO_S1G_COMMON_CFG_LANE_CTRL_X(x)                (((x) & GENMASK(15, 13)) >> 13)
-#define HSIO_S1G_COMMON_CFG_ENA_DIRECT                    BIT(12)
-#define HSIO_S1G_COMMON_CFG_ENA_ELOOP                     BIT(11)
-#define HSIO_S1G_COMMON_CFG_ENA_FLOOP                     BIT(10)
-#define HSIO_S1G_COMMON_CFG_ENA_ILOOP                     BIT(9)
-#define HSIO_S1G_COMMON_CFG_ENA_PLOOP                     BIT(8)
-#define HSIO_S1G_COMMON_CFG_HRATE                         BIT(7)
-#define HSIO_S1G_COMMON_CFG_IF_MODE                       BIT(0)
-
-#define HSIO_S1G_PLL_CFG_PLL_ENA_FB_DIV2                  BIT(22)
-#define HSIO_S1G_PLL_CFG_PLL_ENA_RC_DIV2                  BIT(21)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(x)             (((x) << 8) & GENMASK(15, 8))
-#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M              GENMASK(15, 8)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x)           (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_ENA                      BIT(7)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_FORCE_SET_ENA            BIT(6)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA            BIT(5)
-#define HSIO_S1G_PLL_CFG_PLL_RB_DATA_SEL                  BIT(3)
-
-#define HSIO_S1G_PLL_STATUS_PLL_CAL_NOT_DONE              BIT(12)
-#define HSIO_S1G_PLL_STATUS_PLL_CAL_ERR                   BIT(11)
-#define HSIO_S1G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR          BIT(10)
-#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA(x)                ((x) & GENMASK(7, 0))
-#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA_M                 GENMASK(7, 0)
-
-#define HSIO_S1G_DFT_CFG0_LAZYBIT                         BIT(31)
-#define HSIO_S1G_DFT_CFG0_INV_DIS                         BIT(23)
-#define HSIO_S1G_DFT_CFG0_PRBS_SEL(x)                     (((x) << 20) & GENMASK(21, 20))
-#define HSIO_S1G_DFT_CFG0_PRBS_SEL_M                      GENMASK(21, 20)
-#define HSIO_S1G_DFT_CFG0_PRBS_SEL_X(x)                   (((x) & GENMASK(21, 20)) >> 20)
-#define HSIO_S1G_DFT_CFG0_TEST_MODE(x)                    (((x) << 16) & GENMASK(18, 16))
-#define HSIO_S1G_DFT_CFG0_TEST_MODE_M                     GENMASK(18, 16)
-#define HSIO_S1G_DFT_CFG0_TEST_MODE_X(x)                  (((x) & GENMASK(18, 16)) >> 16)
-#define HSIO_S1G_DFT_CFG0_RX_PHS_CORR_DIS                 BIT(4)
-#define HSIO_S1G_DFT_CFG0_RX_PDSENS_ENA                   BIT(3)
-#define HSIO_S1G_DFT_CFG0_RX_DFT_ENA                      BIT(2)
-#define HSIO_S1G_DFT_CFG0_TX_DFT_ENA                      BIT(0)
-
-#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL(x)               (((x) << 8) & GENMASK(17, 8))
-#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_M                GENMASK(17, 8)
-#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_X(x)             (((x) & GENMASK(17, 8)) >> 8)
-#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ(x)                 (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_M                  GENMASK(7, 4)
-#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_X(x)               (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S1G_DFT_CFG1_TX_JI_ENA                       BIT(3)
-#define HSIO_S1G_DFT_CFG1_TX_WAVEFORM_SEL                 BIT(2)
-#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_DIR                  BIT(1)
-#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_ENA                  BIT(0)
-
-#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL(x)               (((x) << 8) & GENMASK(17, 8))
-#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_M                GENMASK(17, 8)
-#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_X(x)             (((x) & GENMASK(17, 8)) >> 8)
-#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ(x)                 (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_M                  GENMASK(7, 4)
-#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_X(x)               (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S1G_DFT_CFG2_RX_JI_ENA                       BIT(3)
-#define HSIO_S1G_DFT_CFG2_RX_WAVEFORM_SEL                 BIT(2)
-#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_DIR                  BIT(1)
-#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_ENA                  BIT(0)
-
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_ENA             BIT(20)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x)     (((x) << 16) & GENMASK(17, 16))
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M      GENMASK(17, 16)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x)   (((x) & GENMASK(17, 16)) >> 16)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x)         (((x) << 8) & GENMASK(15, 8))
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M          GENMASK(15, 8)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x)       (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x)          ((x) & GENMASK(7, 0))
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M           GENMASK(7, 0)
-
-#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE(x)          (((x) << 11) & GENMASK(12, 11))
-#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_M           GENMASK(12, 11)
-#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_X(x)        (((x) & GENMASK(12, 11)) >> 11)
-#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_SWAP             BIT(10)
-#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_MODE             BIT(9)
-#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA              BIT(8)
-#define HSIO_S1G_MISC_CFG_RX_LPI_MODE_ENA                 BIT(5)
-#define HSIO_S1G_MISC_CFG_TX_LPI_MODE_ENA                 BIT(4)
-#define HSIO_S1G_MISC_CFG_RX_DATA_INV_ENA                 BIT(3)
-#define HSIO_S1G_MISC_CFG_TX_DATA_INV_ENA                 BIT(2)
-#define HSIO_S1G_MISC_CFG_LANE_RST                        BIT(0)
-
-#define HSIO_S1G_DFT_STATUS_PLL_BIST_NOT_DONE             BIT(7)
-#define HSIO_S1G_DFT_STATUS_PLL_BIST_FAILED               BIT(6)
-#define HSIO_S1G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR          BIT(5)
-#define HSIO_S1G_DFT_STATUS_BIST_ACTIVE                   BIT(3)
-#define HSIO_S1G_DFT_STATUS_BIST_NOSYNC                   BIT(2)
-#define HSIO_S1G_DFT_STATUS_BIST_COMPLETE_N               BIT(1)
-#define HSIO_S1G_DFT_STATUS_BIST_ERROR                    BIT(0)
-
-#define HSIO_S1G_MISC_STATUS_DES_100FX_PHASE_SEL          BIT(0)
-
-#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT        BIT(31)
-#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT        BIT(30)
-#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(x)            ((x) & GENMASK(8, 0))
-#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR_M             GENMASK(8, 0)
-
-#define HSIO_S6G_DIG_CFG_GP(x)                            (((x) << 16) & GENMASK(18, 16))
-#define HSIO_S6G_DIG_CFG_GP_M                             GENMASK(18, 16)
-#define HSIO_S6G_DIG_CFG_GP_X(x)                          (((x) & GENMASK(18, 16)) >> 16)
-#define HSIO_S6G_DIG_CFG_TX_BIT_DOUBLING_MODE_ENA         BIT(7)
-#define HSIO_S6G_DIG_CFG_SIGDET_TESTMODE                  BIT(6)
-#define HSIO_S6G_DIG_CFG_SIGDET_AST(x)                    (((x) << 3) & GENMASK(5, 3))
-#define HSIO_S6G_DIG_CFG_SIGDET_AST_M                     GENMASK(5, 3)
-#define HSIO_S6G_DIG_CFG_SIGDET_AST_X(x)                  (((x) & GENMASK(5, 3)) >> 3)
-#define HSIO_S6G_DIG_CFG_SIGDET_DST(x)                    ((x) & GENMASK(2, 0))
-#define HSIO_S6G_DIG_CFG_SIGDET_DST_M                     GENMASK(2, 0)
-
-#define HSIO_S6G_DFT_CFG0_LAZYBIT                         BIT(31)
-#define HSIO_S6G_DFT_CFG0_INV_DIS                         BIT(23)
-#define HSIO_S6G_DFT_CFG0_PRBS_SEL(x)                     (((x) << 20) & GENMASK(21, 20))
-#define HSIO_S6G_DFT_CFG0_PRBS_SEL_M                      GENMASK(21, 20)
-#define HSIO_S6G_DFT_CFG0_PRBS_SEL_X(x)                   (((x) & GENMASK(21, 20)) >> 20)
-#define HSIO_S6G_DFT_CFG0_TEST_MODE(x)                    (((x) << 16) & GENMASK(18, 16))
-#define HSIO_S6G_DFT_CFG0_TEST_MODE_M                     GENMASK(18, 16)
-#define HSIO_S6G_DFT_CFG0_TEST_MODE_X(x)                  (((x) & GENMASK(18, 16)) >> 16)
-#define HSIO_S6G_DFT_CFG0_RX_PHS_CORR_DIS                 BIT(4)
-#define HSIO_S6G_DFT_CFG0_RX_PDSENS_ENA                   BIT(3)
-#define HSIO_S6G_DFT_CFG0_RX_DFT_ENA                      BIT(2)
-#define HSIO_S6G_DFT_CFG0_TX_DFT_ENA                      BIT(0)
-
-#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL(x)               (((x) << 8) & GENMASK(17, 8))
-#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_M                GENMASK(17, 8)
-#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_X(x)             (((x) & GENMASK(17, 8)) >> 8)
-#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ(x)                 (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_M                  GENMASK(7, 4)
-#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_X(x)               (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S6G_DFT_CFG1_TX_JI_ENA                       BIT(3)
-#define HSIO_S6G_DFT_CFG1_TX_WAVEFORM_SEL                 BIT(2)
-#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_DIR                  BIT(1)
-#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_ENA                  BIT(0)
-
-#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL(x)               (((x) << 8) & GENMASK(17, 8))
-#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_M                GENMASK(17, 8)
-#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_X(x)             (((x) & GENMASK(17, 8)) >> 8)
-#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ(x)                 (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_M                  GENMASK(7, 4)
-#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_X(x)               (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S6G_DFT_CFG2_RX_JI_ENA                       BIT(3)
-#define HSIO_S6G_DFT_CFG2_RX_WAVEFORM_SEL                 BIT(2)
-#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_DIR                  BIT(1)
-#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_ENA                  BIT(0)
-
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_ENA             BIT(20)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x)     (((x) << 16) & GENMASK(19, 16))
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M      GENMASK(19, 16)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x)   (((x) & GENMASK(19, 16)) >> 16)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x)         (((x) << 8) & GENMASK(15, 8))
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M          GENMASK(15, 8)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x)       (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x)          ((x) & GENMASK(7, 0))
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M           GENMASK(7, 0)
-
-#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK(x)                 (((x) << 13) & GENMASK(14, 13))
-#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_M                  GENMASK(14, 13)
-#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_X(x)               (((x) & GENMASK(14, 13)) >> 13)
-#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE(x)          (((x) << 11) & GENMASK(12, 11))
-#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_M           GENMASK(12, 11)
-#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_X(x)        (((x) & GENMASK(12, 11)) >> 11)
-#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_SWAP             BIT(10)
-#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_MODE             BIT(9)
-#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA              BIT(8)
-#define HSIO_S6G_MISC_CFG_RX_BUS_FLIP_ENA                 BIT(7)
-#define HSIO_S6G_MISC_CFG_TX_BUS_FLIP_ENA                 BIT(6)
-#define HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA                 BIT(5)
-#define HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA                 BIT(4)
-#define HSIO_S6G_MISC_CFG_RX_DATA_INV_ENA                 BIT(3)
-#define HSIO_S6G_MISC_CFG_TX_DATA_INV_ENA                 BIT(2)
-#define HSIO_S6G_MISC_CFG_LANE_RST                        BIT(0)
-
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0(x)               (((x) << 23) & GENMASK(28, 23))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_M                GENMASK(28, 23)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_X(x)             (((x) & GENMASK(28, 23)) >> 23)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1(x)               (((x) << 18) & GENMASK(22, 18))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_M                GENMASK(22, 18)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_X(x)             (((x) & GENMASK(22, 18)) >> 18)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC(x)                (((x) << 13) & GENMASK(17, 13))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_M                 GENMASK(17, 13)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_X(x)              (((x) & GENMASK(17, 13)) >> 13)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS(x)             (((x) << 6) & GENMASK(8, 6))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_M              GENMASK(8, 6)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_X(x)           (((x) & GENMASK(8, 6)) >> 6)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV(x)                 ((x) & GENMASK(5, 0))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV_M                  GENMASK(5, 0)
-
-#define HSIO_S6G_DFT_STATUS_PRBS_SYNC_STAT                BIT(8)
-#define HSIO_S6G_DFT_STATUS_PLL_BIST_NOT_DONE             BIT(7)
-#define HSIO_S6G_DFT_STATUS_PLL_BIST_FAILED               BIT(6)
-#define HSIO_S6G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR          BIT(5)
-#define HSIO_S6G_DFT_STATUS_BIST_ACTIVE                   BIT(3)
-#define HSIO_S6G_DFT_STATUS_BIST_NOSYNC                   BIT(2)
-#define HSIO_S6G_DFT_STATUS_BIST_COMPLETE_N               BIT(1)
-#define HSIO_S6G_DFT_STATUS_BIST_ERROR                    BIT(0)
-
-#define HSIO_S6G_MISC_STATUS_DES_100FX_PHASE_SEL          BIT(0)
-
-#define HSIO_S6G_DES_CFG_DES_PHS_CTRL(x)                  (((x) << 13) & GENMASK(16, 13))
-#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_M                   GENMASK(16, 13)
-#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_X(x)                (((x) & GENMASK(16, 13)) >> 13)
-#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL(x)                 (((x) << 10) & GENMASK(12, 10))
-#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_M                  GENMASK(12, 10)
-#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_X(x)               (((x) & GENMASK(12, 10)) >> 10)
-#define HSIO_S6G_DES_CFG_DES_CPMD_SEL(x)                  (((x) << 8) & GENMASK(9, 8))
-#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_M                   GENMASK(9, 8)
-#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_X(x)                (((x) & GENMASK(9, 8)) >> 8)
-#define HSIO_S6G_DES_CFG_DES_BW_HYST(x)                   (((x) << 5) & GENMASK(7, 5))
-#define HSIO_S6G_DES_CFG_DES_BW_HYST_M                    GENMASK(7, 5)
-#define HSIO_S6G_DES_CFG_DES_BW_HYST_X(x)                 (((x) & GENMASK(7, 5)) >> 5)
-#define HSIO_S6G_DES_CFG_DES_SWAP_HYST                    BIT(4)
-#define HSIO_S6G_DES_CFG_DES_BW_ANA(x)                    (((x) << 1) & GENMASK(3, 1))
-#define HSIO_S6G_DES_CFG_DES_BW_ANA_M                     GENMASK(3, 1)
-#define HSIO_S6G_DES_CFG_DES_BW_ANA_X(x)                  (((x) & GENMASK(3, 1)) >> 1)
-#define HSIO_S6G_DES_CFG_DES_SWAP_ANA                     BIT(0)
-
-#define HSIO_S6G_IB_CFG_IB_SOFSI(x)                       (((x) << 29) & GENMASK(30, 29))
-#define HSIO_S6G_IB_CFG_IB_SOFSI_M                        GENMASK(30, 29)
-#define HSIO_S6G_IB_CFG_IB_SOFSI_X(x)                     (((x) & GENMASK(30, 29)) >> 29)
-#define HSIO_S6G_IB_CFG_IB_VBULK_SEL                      BIT(28)
-#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ(x)                    (((x) << 24) & GENMASK(27, 24))
-#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_M                     GENMASK(27, 24)
-#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_X(x)                  (((x) & GENMASK(27, 24)) >> 24)
-#define HSIO_S6G_IB_CFG_IB_ICML_ADJ(x)                    (((x) << 20) & GENMASK(23, 20))
-#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_M                     GENMASK(23, 20)
-#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_X(x)                  (((x) & GENMASK(23, 20)) >> 20)
-#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL(x)               (((x) << 18) & GENMASK(19, 18))
-#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_M                GENMASK(19, 18)
-#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_X(x)             (((x) & GENMASK(19, 18)) >> 18)
-#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(x)             (((x) << 15) & GENMASK(17, 15))
-#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M              GENMASK(17, 15)
-#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_X(x)           (((x) & GENMASK(17, 15)) >> 15)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP(x)              (((x) << 13) & GENMASK(14, 13))
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_M               GENMASK(14, 13)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_X(x)            (((x) & GENMASK(14, 13)) >> 13)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID(x)             (((x) << 11) & GENMASK(12, 11))
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_M              GENMASK(12, 11)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_X(x)           (((x) & GENMASK(12, 11)) >> 11)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP(x)              (((x) << 9) & GENMASK(10, 9))
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_M               GENMASK(10, 9)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_X(x)            (((x) & GENMASK(10, 9)) >> 9)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(x)          (((x) << 7) & GENMASK(8, 7))
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M           GENMASK(8, 7)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_X(x)        (((x) & GENMASK(8, 7)) >> 7)
-#define HSIO_S6G_IB_CFG_IB_ANA_TEST_ENA                   BIT(6)
-#define HSIO_S6G_IB_CFG_IB_SIG_DET_ENA                    BIT(5)
-#define HSIO_S6G_IB_CFG_IB_CONCUR                         BIT(4)
-#define HSIO_S6G_IB_CFG_IB_CAL_ENA                        BIT(3)
-#define HSIO_S6G_IB_CFG_IB_SAM_ENA                        BIT(2)
-#define HSIO_S6G_IB_CFG_IB_EQZ_ENA                        BIT(1)
-#define HSIO_S6G_IB_CFG_IB_REG_ENA                        BIT(0)
-
-#define HSIO_S6G_IB_CFG1_IB_TJTAG(x)                      (((x) << 17) & GENMASK(21, 17))
-#define HSIO_S6G_IB_CFG1_IB_TJTAG_M                       GENMASK(21, 17)
-#define HSIO_S6G_IB_CFG1_IB_TJTAG_X(x)                    (((x) & GENMASK(21, 17)) >> 17)
-#define HSIO_S6G_IB_CFG1_IB_TSDET(x)                      (((x) << 12) & GENMASK(16, 12))
-#define HSIO_S6G_IB_CFG1_IB_TSDET_M                       GENMASK(16, 12)
-#define HSIO_S6G_IB_CFG1_IB_TSDET_X(x)                    (((x) & GENMASK(16, 12)) >> 12)
-#define HSIO_S6G_IB_CFG1_IB_SCALY(x)                      (((x) << 8) & GENMASK(11, 8))
-#define HSIO_S6G_IB_CFG1_IB_SCALY_M                       GENMASK(11, 8)
-#define HSIO_S6G_IB_CFG1_IB_SCALY_X(x)                    (((x) & GENMASK(11, 8)) >> 8)
-#define HSIO_S6G_IB_CFG1_IB_FILT_HP                       BIT(7)
-#define HSIO_S6G_IB_CFG1_IB_FILT_MID                      BIT(6)
-#define HSIO_S6G_IB_CFG1_IB_FILT_LP                       BIT(5)
-#define HSIO_S6G_IB_CFG1_IB_FILT_OFFSET                   BIT(4)
-#define HSIO_S6G_IB_CFG1_IB_FRC_HP                        BIT(3)
-#define HSIO_S6G_IB_CFG1_IB_FRC_MID                       BIT(2)
-#define HSIO_S6G_IB_CFG1_IB_FRC_LP                        BIT(1)
-#define HSIO_S6G_IB_CFG1_IB_FRC_OFFSET                    BIT(0)
-
-#define HSIO_S6G_IB_CFG2_IB_TINFV(x)                      (((x) << 27) & GENMASK(29, 27))
-#define HSIO_S6G_IB_CFG2_IB_TINFV_M                       GENMASK(29, 27)
-#define HSIO_S6G_IB_CFG2_IB_TINFV_X(x)                    (((x) & GENMASK(29, 27)) >> 27)
-#define HSIO_S6G_IB_CFG2_IB_OINFI(x)                      (((x) << 22) & GENMASK(26, 22))
-#define HSIO_S6G_IB_CFG2_IB_OINFI_M                       GENMASK(26, 22)
-#define HSIO_S6G_IB_CFG2_IB_OINFI_X(x)                    (((x) & GENMASK(26, 22)) >> 22)
-#define HSIO_S6G_IB_CFG2_IB_TAUX(x)                       (((x) << 19) & GENMASK(21, 19))
-#define HSIO_S6G_IB_CFG2_IB_TAUX_M                        GENMASK(21, 19)
-#define HSIO_S6G_IB_CFG2_IB_TAUX_X(x)                     (((x) & GENMASK(21, 19)) >> 19)
-#define HSIO_S6G_IB_CFG2_IB_OINFS(x)                      (((x) << 16) & GENMASK(18, 16))
-#define HSIO_S6G_IB_CFG2_IB_OINFS_M                       GENMASK(18, 16)
-#define HSIO_S6G_IB_CFG2_IB_OINFS_X(x)                    (((x) & GENMASK(18, 16)) >> 16)
-#define HSIO_S6G_IB_CFG2_IB_OCALS(x)                      (((x) << 10) & GENMASK(15, 10))
-#define HSIO_S6G_IB_CFG2_IB_OCALS_M                       GENMASK(15, 10)
-#define HSIO_S6G_IB_CFG2_IB_OCALS_X(x)                    (((x) & GENMASK(15, 10)) >> 10)
-#define HSIO_S6G_IB_CFG2_IB_TCALV(x)                      (((x) << 5) & GENMASK(9, 5))
-#define HSIO_S6G_IB_CFG2_IB_TCALV_M                       GENMASK(9, 5)
-#define HSIO_S6G_IB_CFG2_IB_TCALV_X(x)                    (((x) & GENMASK(9, 5)) >> 5)
-#define HSIO_S6G_IB_CFG2_IB_UMAX(x)                       (((x) << 3) & GENMASK(4, 3))
-#define HSIO_S6G_IB_CFG2_IB_UMAX_M                        GENMASK(4, 3)
-#define HSIO_S6G_IB_CFG2_IB_UMAX_X(x)                     (((x) & GENMASK(4, 3)) >> 3)
-#define HSIO_S6G_IB_CFG2_IB_UREG(x)                       ((x) & GENMASK(2, 0))
-#define HSIO_S6G_IB_CFG2_IB_UREG_M                        GENMASK(2, 0)
-
-#define HSIO_S6G_IB_CFG3_IB_INI_HP(x)                     (((x) << 18) & GENMASK(23, 18))
-#define HSIO_S6G_IB_CFG3_IB_INI_HP_M                      GENMASK(23, 18)
-#define HSIO_S6G_IB_CFG3_IB_INI_HP_X(x)                   (((x) & GENMASK(23, 18)) >> 18)
-#define HSIO_S6G_IB_CFG3_IB_INI_MID(x)                    (((x) << 12) & GENMASK(17, 12))
-#define HSIO_S6G_IB_CFG3_IB_INI_MID_M                     GENMASK(17, 12)
-#define HSIO_S6G_IB_CFG3_IB_INI_MID_X(x)                  (((x) & GENMASK(17, 12)) >> 12)
-#define HSIO_S6G_IB_CFG3_IB_INI_LP(x)                     (((x) << 6) & GENMASK(11, 6))
-#define HSIO_S6G_IB_CFG3_IB_INI_LP_M                      GENMASK(11, 6)
-#define HSIO_S6G_IB_CFG3_IB_INI_LP_X(x)                   (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET(x)                 ((x) & GENMASK(5, 0))
-#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M                  GENMASK(5, 0)
-
-#define HSIO_S6G_IB_CFG4_IB_MAX_HP(x)                     (((x) << 18) & GENMASK(23, 18))
-#define HSIO_S6G_IB_CFG4_IB_MAX_HP_M                      GENMASK(23, 18)
-#define HSIO_S6G_IB_CFG4_IB_MAX_HP_X(x)                   (((x) & GENMASK(23, 18)) >> 18)
-#define HSIO_S6G_IB_CFG4_IB_MAX_MID(x)                    (((x) << 12) & GENMASK(17, 12))
-#define HSIO_S6G_IB_CFG4_IB_MAX_MID_M                     GENMASK(17, 12)
-#define HSIO_S6G_IB_CFG4_IB_MAX_MID_X(x)                  (((x) & GENMASK(17, 12)) >> 12)
-#define HSIO_S6G_IB_CFG4_IB_MAX_LP(x)                     (((x) << 6) & GENMASK(11, 6))
-#define HSIO_S6G_IB_CFG4_IB_MAX_LP_M                      GENMASK(11, 6)
-#define HSIO_S6G_IB_CFG4_IB_MAX_LP_X(x)                   (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET(x)                 ((x) & GENMASK(5, 0))
-#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET_M                  GENMASK(5, 0)
-
-#define HSIO_S6G_IB_CFG5_IB_MIN_HP(x)                     (((x) << 18) & GENMASK(23, 18))
-#define HSIO_S6G_IB_CFG5_IB_MIN_HP_M                      GENMASK(23, 18)
-#define HSIO_S6G_IB_CFG5_IB_MIN_HP_X(x)                   (((x) & GENMASK(23, 18)) >> 18)
-#define HSIO_S6G_IB_CFG5_IB_MIN_MID(x)                    (((x) << 12) & GENMASK(17, 12))
-#define HSIO_S6G_IB_CFG5_IB_MIN_MID_M                     GENMASK(17, 12)
-#define HSIO_S6G_IB_CFG5_IB_MIN_MID_X(x)                  (((x) & GENMASK(17, 12)) >> 12)
-#define HSIO_S6G_IB_CFG5_IB_MIN_LP(x)                     (((x) << 6) & GENMASK(11, 6))
-#define HSIO_S6G_IB_CFG5_IB_MIN_LP_M                      GENMASK(11, 6)
-#define HSIO_S6G_IB_CFG5_IB_MIN_LP_X(x)                   (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET(x)                 ((x) & GENMASK(5, 0))
-#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET_M                  GENMASK(5, 0)
-
-#define HSIO_S6G_OB_CFG_OB_IDLE                           BIT(31)
-#define HSIO_S6G_OB_CFG_OB_ENA1V_MODE                     BIT(30)
-#define HSIO_S6G_OB_CFG_OB_POL                            BIT(29)
-#define HSIO_S6G_OB_CFG_OB_POST0(x)                       (((x) << 23) & GENMASK(28, 23))
-#define HSIO_S6G_OB_CFG_OB_POST0_M                        GENMASK(28, 23)
-#define HSIO_S6G_OB_CFG_OB_POST0_X(x)                     (((x) & GENMASK(28, 23)) >> 23)
-#define HSIO_S6G_OB_CFG_OB_PREC(x)                        (((x) << 18) & GENMASK(22, 18))
-#define HSIO_S6G_OB_CFG_OB_PREC_M                         GENMASK(22, 18)
-#define HSIO_S6G_OB_CFG_OB_PREC_X(x)                      (((x) & GENMASK(22, 18)) >> 18)
-#define HSIO_S6G_OB_CFG_OB_R_ADJ_MUX                      BIT(17)
-#define HSIO_S6G_OB_CFG_OB_R_ADJ_PDR                      BIT(16)
-#define HSIO_S6G_OB_CFG_OB_POST1(x)                       (((x) << 11) & GENMASK(15, 11))
-#define HSIO_S6G_OB_CFG_OB_POST1_M                        GENMASK(15, 11)
-#define HSIO_S6G_OB_CFG_OB_POST1_X(x)                     (((x) & GENMASK(15, 11)) >> 11)
-#define HSIO_S6G_OB_CFG_OB_R_COR                          BIT(10)
-#define HSIO_S6G_OB_CFG_OB_SEL_RCTRL                      BIT(9)
-#define HSIO_S6G_OB_CFG_OB_SR_H                           BIT(8)
-#define HSIO_S6G_OB_CFG_OB_SR(x)                          (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S6G_OB_CFG_OB_SR_M                           GENMASK(7, 4)
-#define HSIO_S6G_OB_CFG_OB_SR_X(x)                        (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL(x)               ((x) & GENMASK(3, 0))
-#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL_M                GENMASK(3, 0)
-
-#define HSIO_S6G_OB_CFG1_OB_ENA_CAS(x)                    (((x) << 6) & GENMASK(8, 6))
-#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_M                     GENMASK(8, 6)
-#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_X(x)                  (((x) & GENMASK(8, 6)) >> 6)
-#define HSIO_S6G_OB_CFG1_OB_LEV(x)                        ((x) & GENMASK(5, 0))
-#define HSIO_S6G_OB_CFG1_OB_LEV_M                         GENMASK(5, 0)
-
-#define HSIO_S6G_SER_CFG_SER_4TAP_ENA                     BIT(8)
-#define HSIO_S6G_SER_CFG_SER_CPMD_SEL                     BIT(7)
-#define HSIO_S6G_SER_CFG_SER_SWAP_CPMD                    BIT(6)
-#define HSIO_S6G_SER_CFG_SER_ALISEL(x)                    (((x) << 4) & GENMASK(5, 4))
-#define HSIO_S6G_SER_CFG_SER_ALISEL_M                     GENMASK(5, 4)
-#define HSIO_S6G_SER_CFG_SER_ALISEL_X(x)                  (((x) & GENMASK(5, 4)) >> 4)
-#define HSIO_S6G_SER_CFG_SER_ENHYS                        BIT(3)
-#define HSIO_S6G_SER_CFG_SER_BIG_WIN                      BIT(2)
-#define HSIO_S6G_SER_CFG_SER_EN_WIN                       BIT(1)
-#define HSIO_S6G_SER_CFG_SER_ENALI                        BIT(0)
-
-#define HSIO_S6G_COMMON_CFG_SYS_RST                       BIT(17)
-#define HSIO_S6G_COMMON_CFG_SE_DIV2_ENA                   BIT(16)
-#define HSIO_S6G_COMMON_CFG_SE_AUTO_SQUELCH_ENA           BIT(15)
-#define HSIO_S6G_COMMON_CFG_ENA_LANE                      BIT(14)
-#define HSIO_S6G_COMMON_CFG_PWD_RX                        BIT(13)
-#define HSIO_S6G_COMMON_CFG_PWD_TX                        BIT(12)
-#define HSIO_S6G_COMMON_CFG_LANE_CTRL(x)                  (((x) << 9) & GENMASK(11, 9))
-#define HSIO_S6G_COMMON_CFG_LANE_CTRL_M                   GENMASK(11, 9)
-#define HSIO_S6G_COMMON_CFG_LANE_CTRL_X(x)                (((x) & GENMASK(11, 9)) >> 9)
-#define HSIO_S6G_COMMON_CFG_ENA_DIRECT                    BIT(8)
-#define HSIO_S6G_COMMON_CFG_ENA_ELOOP                     BIT(7)
-#define HSIO_S6G_COMMON_CFG_ENA_FLOOP                     BIT(6)
-#define HSIO_S6G_COMMON_CFG_ENA_ILOOP                     BIT(5)
-#define HSIO_S6G_COMMON_CFG_ENA_PLOOP                     BIT(4)
-#define HSIO_S6G_COMMON_CFG_HRATE                         BIT(3)
-#define HSIO_S6G_COMMON_CFG_QRATE                         BIT(2)
-#define HSIO_S6G_COMMON_CFG_IF_MODE(x)                    ((x) & GENMASK(1, 0))
-#define HSIO_S6G_COMMON_CFG_IF_MODE_M                     GENMASK(1, 0)
-
-#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS(x)                  (((x) << 16) & GENMASK(17, 16))
-#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_M                   GENMASK(17, 16)
-#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_X(x)                (((x) & GENMASK(17, 16)) >> 16)
-#define HSIO_S6G_PLL_CFG_PLL_DIV4                         BIT(15)
-#define HSIO_S6G_PLL_CFG_PLL_ENA_ROT                      BIT(14)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA(x)             (((x) << 6) & GENMASK(13, 6))
-#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M              GENMASK(13, 6)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x)           (((x) & GENMASK(13, 6)) >> 6)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_ENA                      BIT(5)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_FORCE_SET_ENA            BIT(4)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA            BIT(3)
-#define HSIO_S6G_PLL_CFG_PLL_RB_DATA_SEL                  BIT(2)
-#define HSIO_S6G_PLL_CFG_PLL_ROT_DIR                      BIT(1)
-#define HSIO_S6G_PLL_CFG_PLL_ROT_FRQ                      BIT(0)
-
-#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_N            BIT(5)
-#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_P            BIT(4)
-#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_CLK               BIT(3)
-#define HSIO_S6G_ACJTAG_CFG_OB_DIRECT                     BIT(2)
-#define HSIO_S6G_ACJTAG_CFG_ACJTAG_ENA                    BIT(1)
-#define HSIO_S6G_ACJTAG_CFG_JTAG_CTRL_ENA                 BIT(0)
-
-#define HSIO_S6G_GP_CFG_GP_MSB(x)                         (((x) << 16) & GENMASK(31, 16))
-#define HSIO_S6G_GP_CFG_GP_MSB_M                          GENMASK(31, 16)
-#define HSIO_S6G_GP_CFG_GP_MSB_X(x)                       (((x) & GENMASK(31, 16)) >> 16)
-#define HSIO_S6G_GP_CFG_GP_LSB(x)                         ((x) & GENMASK(15, 0))
-#define HSIO_S6G_GP_CFG_GP_LSB_M                          GENMASK(15, 0)
-
-#define HSIO_S6G_IB_STATUS0_IB_CAL_DONE                   BIT(8)
-#define HSIO_S6G_IB_STATUS0_IB_HP_GAIN_ACT                BIT(7)
-#define HSIO_S6G_IB_STATUS0_IB_MID_GAIN_ACT               BIT(6)
-#define HSIO_S6G_IB_STATUS0_IB_LP_GAIN_ACT                BIT(5)
-#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ACT                 BIT(4)
-#define HSIO_S6G_IB_STATUS0_IB_OFFSET_VLD                 BIT(3)
-#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ERR                 BIT(2)
-#define HSIO_S6G_IB_STATUS0_IB_OFFSDIR                    BIT(1)
-#define HSIO_S6G_IB_STATUS0_IB_SIG_DET                    BIT(0)
-
-#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT(x)            (((x) << 18) & GENMASK(23, 18))
-#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_M             GENMASK(23, 18)
-#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_X(x)          (((x) & GENMASK(23, 18)) >> 18)
-#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT(x)           (((x) << 12) & GENMASK(17, 12))
-#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_M            GENMASK(17, 12)
-#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_X(x)         (((x) & GENMASK(17, 12)) >> 12)
-#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT(x)            (((x) << 6) & GENMASK(11, 6))
-#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_M             GENMASK(11, 6)
-#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_X(x)          (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT(x)             ((x) & GENMASK(5, 0))
-#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT_M              GENMASK(5, 0)
-
-#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_N         BIT(2)
-#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_P         BIT(1)
-#define HSIO_S6G_ACJTAG_STATUS_IB_DIRECT                  BIT(0)
-
-#define HSIO_S6G_PLL_STATUS_PLL_CAL_NOT_DONE              BIT(10)
-#define HSIO_S6G_PLL_STATUS_PLL_CAL_ERR                   BIT(9)
-#define HSIO_S6G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR          BIT(8)
-#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA(x)                ((x) & GENMASK(7, 0))
-#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA_M                 GENMASK(7, 0)
-
-#define HSIO_S6G_REVID_SERDES_REV(x)                      (((x) << 26) & GENMASK(31, 26))
-#define HSIO_S6G_REVID_SERDES_REV_M                       GENMASK(31, 26)
-#define HSIO_S6G_REVID_SERDES_REV_X(x)                    (((x) & GENMASK(31, 26)) >> 26)
-#define HSIO_S6G_REVID_RCPLL_REV(x)                       (((x) << 21) & GENMASK(25, 21))
-#define HSIO_S6G_REVID_RCPLL_REV_M                        GENMASK(25, 21)
-#define HSIO_S6G_REVID_RCPLL_REV_X(x)                     (((x) & GENMASK(25, 21)) >> 21)
-#define HSIO_S6G_REVID_SER_REV(x)                         (((x) << 16) & GENMASK(20, 16))
-#define HSIO_S6G_REVID_SER_REV_M                          GENMASK(20, 16)
-#define HSIO_S6G_REVID_SER_REV_X(x)                       (((x) & GENMASK(20, 16)) >> 16)
-#define HSIO_S6G_REVID_DES_REV(x)                         (((x) << 10) & GENMASK(15, 10))
-#define HSIO_S6G_REVID_DES_REV_M                          GENMASK(15, 10)
-#define HSIO_S6G_REVID_DES_REV_X(x)                       (((x) & GENMASK(15, 10)) >> 10)
-#define HSIO_S6G_REVID_OB_REV(x)                          (((x) << 5) & GENMASK(9, 5))
-#define HSIO_S6G_REVID_OB_REV_M                           GENMASK(9, 5)
-#define HSIO_S6G_REVID_OB_REV_X(x)                        (((x) & GENMASK(9, 5)) >> 5)
-#define HSIO_S6G_REVID_IB_REV(x)                          ((x) & GENMASK(4, 0))
-#define HSIO_S6G_REVID_IB_REV_M                           GENMASK(4, 0)
-
-#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT        BIT(31)
-#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT        BIT(30)
-#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(x)            ((x) & GENMASK(24, 0))
-#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR_M             GENMASK(24, 0)
-
-#define HSIO_HW_CFG_DEV2G5_10_MODE                        BIT(6)
-#define HSIO_HW_CFG_DEV1G_9_MODE                          BIT(5)
-#define HSIO_HW_CFG_DEV1G_6_MODE                          BIT(4)
-#define HSIO_HW_CFG_DEV1G_5_MODE                          BIT(3)
-#define HSIO_HW_CFG_DEV1G_4_MODE                          BIT(2)
-#define HSIO_HW_CFG_PCIE_ENA                              BIT(1)
-#define HSIO_HW_CFG_QSGMII_ENA                            BIT(0)
-
-#define HSIO_HW_QSGMII_CFG_SHYST_DIS                      BIT(3)
-#define HSIO_HW_QSGMII_CFG_E_DET_ENA                      BIT(2)
-#define HSIO_HW_QSGMII_CFG_USE_I1_ENA                     BIT(1)
-#define HSIO_HW_QSGMII_CFG_FLIP_LANES                     BIT(0)
-
-#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS(x)           (((x) << 1) & GENMASK(6, 1))
-#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_M            GENMASK(6, 1)
-#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_X(x)         (((x) & GENMASK(6, 1)) >> 1)
-#define HSIO_HW_QSGMII_STAT_SYNC                          BIT(0)
-
-#define HSIO_CLK_CFG_CLKDIV_PHY(x)                        (((x) << 1) & GENMASK(8, 1))
-#define HSIO_CLK_CFG_CLKDIV_PHY_M                         GENMASK(8, 1)
-#define HSIO_CLK_CFG_CLKDIV_PHY_X(x)                      (((x) & GENMASK(8, 1)) >> 1)
-#define HSIO_CLK_CFG_CLKDIV_PHY_DIS                       BIT(0)
-
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_TEMP_RD               BIT(5)
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_RUN                   BIT(4)
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_NO_RST                BIT(3)
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_POWER_UP              BIT(2)
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_CLK                   BIT(1)
-#define HSIO_TEMP_SENSOR_CTRL_SAMPLE_ENA                  BIT(0)
-
-#define HSIO_TEMP_SENSOR_CFG_RUN_WID(x)                   (((x) << 8) & GENMASK(15, 8))
-#define HSIO_TEMP_SENSOR_CFG_RUN_WID_M                    GENMASK(15, 8)
-#define HSIO_TEMP_SENSOR_CFG_RUN_WID_X(x)                 (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER(x)                ((x) & GENMASK(7, 0))
-#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER_M                 GENMASK(7, 0)
-
-#define HSIO_TEMP_SENSOR_STAT_TEMP_VALID                  BIT(8)
-#define HSIO_TEMP_SENSOR_STAT_TEMP(x)                     ((x) & GENMASK(7, 0))
-#define HSIO_TEMP_SENSOR_STAT_TEMP_M                      GENMASK(7, 0)
-
-#endif
index e334b406c40c98746e0b2fe6b47d8e0ec2e5fb7d..9271af18b93bdf86631a32ce1563838600cfba20 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (c) 2017 Microsemi Corporation
  */
 #include "ocelot.h"
+#include <soc/mscc/ocelot_hsio.h>
 
 static const u32 ocelot_ana_regmap[] = {
        REG(ANA_ADVLEARN,                  0x009000),
@@ -102,82 +103,6 @@ static const u32 ocelot_qs_regmap[] = {
        REG(QS_INH_DBG,                    0x000048),
 };
 
-static const u32 ocelot_hsio_regmap[] = {
-       REG(HSIO_PLL5G_CFG0,               0x000000),
-       REG(HSIO_PLL5G_CFG1,               0x000004),
-       REG(HSIO_PLL5G_CFG2,               0x000008),
-       REG(HSIO_PLL5G_CFG3,               0x00000c),
-       REG(HSIO_PLL5G_CFG4,               0x000010),
-       REG(HSIO_PLL5G_CFG5,               0x000014),
-       REG(HSIO_PLL5G_CFG6,               0x000018),
-       REG(HSIO_PLL5G_STATUS0,            0x00001c),
-       REG(HSIO_PLL5G_STATUS1,            0x000020),
-       REG(HSIO_PLL5G_BIST_CFG0,          0x000024),
-       REG(HSIO_PLL5G_BIST_CFG1,          0x000028),
-       REG(HSIO_PLL5G_BIST_CFG2,          0x00002c),
-       REG(HSIO_PLL5G_BIST_STAT0,         0x000030),
-       REG(HSIO_PLL5G_BIST_STAT1,         0x000034),
-       REG(HSIO_RCOMP_CFG0,               0x000038),
-       REG(HSIO_RCOMP_STATUS,             0x00003c),
-       REG(HSIO_SYNC_ETH_CFG,             0x000040),
-       REG(HSIO_SYNC_ETH_PLL_CFG,         0x000048),
-       REG(HSIO_S1G_DES_CFG,              0x00004c),
-       REG(HSIO_S1G_IB_CFG,               0x000050),
-       REG(HSIO_S1G_OB_CFG,               0x000054),
-       REG(HSIO_S1G_SER_CFG,              0x000058),
-       REG(HSIO_S1G_COMMON_CFG,           0x00005c),
-       REG(HSIO_S1G_PLL_CFG,              0x000060),
-       REG(HSIO_S1G_PLL_STATUS,           0x000064),
-       REG(HSIO_S1G_DFT_CFG0,             0x000068),
-       REG(HSIO_S1G_DFT_CFG1,             0x00006c),
-       REG(HSIO_S1G_DFT_CFG2,             0x000070),
-       REG(HSIO_S1G_TP_CFG,               0x000074),
-       REG(HSIO_S1G_RC_PLL_BIST_CFG,      0x000078),
-       REG(HSIO_S1G_MISC_CFG,             0x00007c),
-       REG(HSIO_S1G_DFT_STATUS,           0x000080),
-       REG(HSIO_S1G_MISC_STATUS,          0x000084),
-       REG(HSIO_MCB_S1G_ADDR_CFG,         0x000088),
-       REG(HSIO_S6G_DIG_CFG,              0x00008c),
-       REG(HSIO_S6G_DFT_CFG0,             0x000090),
-       REG(HSIO_S6G_DFT_CFG1,             0x000094),
-       REG(HSIO_S6G_DFT_CFG2,             0x000098),
-       REG(HSIO_S6G_TP_CFG0,              0x00009c),
-       REG(HSIO_S6G_TP_CFG1,              0x0000a0),
-       REG(HSIO_S6G_RC_PLL_BIST_CFG,      0x0000a4),
-       REG(HSIO_S6G_MISC_CFG,             0x0000a8),
-       REG(HSIO_S6G_OB_ANEG_CFG,          0x0000ac),
-       REG(HSIO_S6G_DFT_STATUS,           0x0000b0),
-       REG(HSIO_S6G_ERR_CNT,              0x0000b4),
-       REG(HSIO_S6G_MISC_STATUS,          0x0000b8),
-       REG(HSIO_S6G_DES_CFG,              0x0000bc),
-       REG(HSIO_S6G_IB_CFG,               0x0000c0),
-       REG(HSIO_S6G_IB_CFG1,              0x0000c4),
-       REG(HSIO_S6G_IB_CFG2,              0x0000c8),
-       REG(HSIO_S6G_IB_CFG3,              0x0000cc),
-       REG(HSIO_S6G_IB_CFG4,              0x0000d0),
-       REG(HSIO_S6G_IB_CFG5,              0x0000d4),
-       REG(HSIO_S6G_OB_CFG,               0x0000d8),
-       REG(HSIO_S6G_OB_CFG1,              0x0000dc),
-       REG(HSIO_S6G_SER_CFG,              0x0000e0),
-       REG(HSIO_S6G_COMMON_CFG,           0x0000e4),
-       REG(HSIO_S6G_PLL_CFG,              0x0000e8),
-       REG(HSIO_S6G_ACJTAG_CFG,           0x0000ec),
-       REG(HSIO_S6G_GP_CFG,               0x0000f0),
-       REG(HSIO_S6G_IB_STATUS0,           0x0000f4),
-       REG(HSIO_S6G_IB_STATUS1,           0x0000f8),
-       REG(HSIO_S6G_ACJTAG_STATUS,        0x0000fc),
-       REG(HSIO_S6G_PLL_STATUS,           0x000100),
-       REG(HSIO_S6G_REVID,                0x000104),
-       REG(HSIO_MCB_S6G_ADDR_CFG,         0x000108),
-       REG(HSIO_HW_CFG,                   0x00010c),
-       REG(HSIO_HW_QSGMII_CFG,            0x000110),
-       REG(HSIO_HW_QSGMII_STAT,           0x000114),
-       REG(HSIO_CLK_CFG,                  0x000118),
-       REG(HSIO_TEMP_SENSOR_CTRL,         0x00011c),
-       REG(HSIO_TEMP_SENSOR_CFG,          0x000120),
-       REG(HSIO_TEMP_SENSOR_STAT,         0x000124),
-};
-
 static const u32 ocelot_qsys_regmap[] = {
        REG(QSYS_PORT_MODE,                0x011200),
        REG(QSYS_SWITCH_PORT_MODE,         0x011234),
@@ -302,7 +227,6 @@ static const u32 ocelot_sys_regmap[] = {
 static const u32 *ocelot_regmap[] = {
        [ANA] = ocelot_ana_regmap,
        [QS] = ocelot_qs_regmap,
-       [HSIO] = ocelot_hsio_regmap,
        [QSYS] = ocelot_qsys_regmap,
        [REW] = ocelot_rew_regmap,
        [SYS] = ocelot_sys_regmap,
@@ -453,9 +377,11 @@ static void ocelot_pll5_init(struct ocelot *ocelot)
        /* Configure PLL5. This will need a proper CCF driver
         * The values are coming from the VTSS API for Ocelot
         */
-       ocelot_write(ocelot, HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
-                    HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8), HSIO_PLL5G_CFG4);
-       ocelot_write(ocelot, HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
+       regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4,
+                    HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
+                    HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8));
+       regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0,
+                    HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
                     HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) |
                     HSIO_PLL5G_CFG0_ENA_BIAS |
                     HSIO_PLL5G_CFG0_ENA_VCO_BUF |
@@ -465,13 +391,14 @@ static void ocelot_pll5_init(struct ocelot *ocelot)
                     HSIO_PLL5G_CFG0_SELBGV820(4) |
                     HSIO_PLL5G_CFG0_DIV4 |
                     HSIO_PLL5G_CFG0_ENA_CLKTREE |
-                    HSIO_PLL5G_CFG0_ENA_LANE, HSIO_PLL5G_CFG0);
-       ocelot_write(ocelot, HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
+                    HSIO_PLL5G_CFG0_ENA_LANE);
+       regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2,
+                    HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
                     HSIO_PLL5G_CFG2_EN_RESET_OVERRUN |
                     HSIO_PLL5G_CFG2_GAIN_TEST(0x8) |
                     HSIO_PLL5G_CFG2_ENA_AMPCTRL |
                     HSIO_PLL5G_CFG2_PWD_AMPCTRL_N |
-                    HSIO_PLL5G_CFG2_AMPC_SEL(0x10), HSIO_PLL5G_CFG2);
+                    HSIO_PLL5G_CFG2_AMPC_SEL(0x10));
 }
 
 int ocelot_chip_init(struct ocelot *ocelot)
index db463e20a876cd1bb820f8e7335cbd7fbb24246e..4213fe42ac4de504c8d46e1bd030ec67d9859289 100644 (file)
@@ -177,7 +177,8 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
        return nfp_app_eswitch_mode_get(pf->app, mode);
 }
 
-static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+                                       struct netlink_ext_ack *extack)
 {
        struct nfp_pf *pf = devlink_priv(devlink);
        int ret;
index 8e8dc0db2493a64745e96479e4b8cae8ab456d97..7b91e77b2016654201b5fd23689beb7dba5204ce 100644 (file)
@@ -2077,14 +2077,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
        return true;
 }
 
-static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
 {
        struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
        struct nfp_net *nn = r_vec->nfp_net;
        struct nfp_net_dp *dp = &nn->dp;
+       unsigned int budget = 512;
 
-       while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
+       while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
                continue;
+
+       return budget;
 }
 
 static void nfp_ctrl_poll(unsigned long arg)
@@ -2096,9 +2099,13 @@ static void nfp_ctrl_poll(unsigned long arg)
        __nfp_ctrl_tx_queued(r_vec);
        spin_unlock(&r_vec->lock);
 
-       nfp_ctrl_rx(r_vec);
-
-       nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+       if (nfp_ctrl_rx(r_vec)) {
+               nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+       } else {
+               tasklet_schedule(&r_vec->tasklet);
+               nn_dp_warn(&r_vec->nfp_net->dp,
+                          "control message budget exceeded!\n");
+       }
 }
 
 /* Setup and Configuration
index 1f9149bb2ae6b119b571d9e942c62a4d955cfa8b..2190836eaa1d0fd6e2c79c21f8417b1b1ff02a83 100644 (file)
@@ -113,6 +113,13 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
                                caps->mbox_len = length;
                        }
                        break;
+               case NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0:
+               case NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1:
+                       dev_warn(dev,
+                                "experimental TLV type:%u offset:%u len:%u\n",
+                                FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr),
+                                offset, length);
+                       break;
                default:
                        if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
                                break;
index 4c6fb9ecb72c22ffeefa2d3c2bde32bbe68004a9..863ca04fffbfbe11daf5d5d1da302983784aa112 100644 (file)
  * %NFP_NET_CFG_TLV_TYPE_MBOX:
  * Variable, mailbox area.  Overwrites the default location which is
  * %NFP_NET_CFG_MBOX_BASE and length %NFP_NET_CFG_MBOX_VAL_MAX_SZ.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0:
+ * %NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1:
+ * Variable, experimental IDs.  IDs designated for internal development and
+ * experiments before a stable TLV ID has been allocated to a feature.  Should
+ * never be present in production firmware.
  */
 #define NFP_NET_CFG_TLV_TYPE_UNKNOWN           0
 #define NFP_NET_CFG_TLV_TYPE_RESERVED          1
 #define NFP_NET_CFG_TLV_TYPE_END               2
 #define NFP_NET_CFG_TLV_TYPE_ME_FREQ           3
 #define NFP_NET_CFG_TLV_TYPE_MBOX              4
+#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0     5
+#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1     6
 
 struct device;
 
index 74cf52e3fb094219128a1c98f18c215d7086bd00..0611f2335b4aaadacb86890ce3587d25b5209460 100644 (file)
@@ -127,8 +127,8 @@ struct nixge_hw_dma_bd {
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
 #define nixge_hw_dma_bd_set_addr(bd, field, addr) \
        do { \
-               (bd)->field##_lo = lower_32_bits(((u64)addr)); \
-               (bd)->field##_hi = upper_32_bits(((u64)addr)); \
+               (bd)->field##_lo = lower_32_bits((addr)); \
+               (bd)->field##_hi = upper_32_bits((addr)); \
        } while (0)
 #else
 #define nixge_hw_dma_bd_set_addr(bd, field, addr) \
@@ -251,7 +251,7 @@ static void nixge_hw_dma_bd_release(struct net_device *ndev)
                                 NIXGE_MAX_JUMBO_FRAME_SIZE,
                                 DMA_FROM_DEVICE);
 
-               skb = (struct sk_buff *)
+               skb = (struct sk_buff *)(uintptr_t)
                        nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
                                                 sw_id_offset);
                dev_kfree_skb(skb);
@@ -323,7 +323,7 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
                if (!skb)
                        goto out;
 
-               nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], skb);
+               nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], (uintptr_t)skb);
                phys = dma_map_single(ndev->dev.parent, skb->data,
                                      NIXGE_MAX_JUMBO_FRAME_SIZE,
                                      DMA_FROM_DEVICE);
@@ -601,8 +601,8 @@ static int nixge_recv(struct net_device *ndev, int budget)
                tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) *
                         priv->rx_bd_ci;
 
-               skb = (struct sk_buff *)nixge_hw_dma_bd_get_addr(cur_p,
-                                                                sw_id_offset);
+               skb = (struct sk_buff *)(uintptr_t)
+                       nixge_hw_dma_bd_get_addr(cur_p, sw_id_offset);
 
                length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
                if (length > NIXGE_MAX_JUMBO_FRAME_SIZE)
@@ -643,7 +643,7 @@ static int nixge_recv(struct net_device *ndev, int budget)
                nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
                cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
                cur_p->status = 0;
-               nixge_hw_dma_bd_set_offset(cur_p, new_skb);
+               nixge_hw_dma_bd_set_offset(cur_p, (uintptr_t)new_skb);
 
                ++priv->rx_bd_ci;
                priv->rx_bd_ci %= RX_BD_NUM;
index 69aa7fc392c5e4ad1cbcd9025f56bffdf3aa92c7..59c70be22a84c11262388529cf0ddf09887cea96 100644 (file)
@@ -72,9 +72,6 @@ static void netxen_schedule_work(struct netxen_adapter *adapter,
                work_func_t func, int delay);
 static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
 static int netxen_nic_poll(struct napi_struct *napi, int budget);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev);
-#endif
 
 static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
 static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
@@ -581,9 +578,6 @@ static const struct net_device_ops netxen_netdev_ops = {
        .ndo_tx_timeout    = netxen_tx_timeout,
        .ndo_fix_features = netxen_fix_features,
        .ndo_set_features = netxen_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = netxen_nic_poll_controller,
-#endif
 };
 
 static inline bool netxen_function_zero(struct pci_dev *pdev)
@@ -2402,23 +2396,6 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev)
-{
-       int ring;
-       struct nx_host_sds_ring *sds_ring;
-       struct netxen_adapter *adapter = netdev_priv(netdev);
-       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
-
-       disable_irq(adapter->irq);
-       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               netxen_intr(adapter->irq, sds_ring);
-       }
-       enable_irq(adapter->irq);
-}
-#endif
-
 static int
 nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
 {
index 0fbeafeef7a04afd6534e634359c279432d47eee..7ceb2b97538d25d767c3d8cc7e7ab79d8b03e760 100644 (file)
@@ -2679,6 +2679,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
                link->speed.forced_speed = 10000;
                break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_20G:
+               link->speed.forced_speed = 20000;
+               break;
        case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
                link->speed.forced_speed = 25000;
                break;
index d4d08383c75334c8991993871012c4e66debbe47..bf431ab86864c56188886ba5d326e8dd613fc952 100644 (file)
@@ -12102,6 +12102,7 @@ struct public_global {
        u32 running_bundle_id;
        s32 external_temperature;
        u32 mdump_reason;
+       u64 reserved;
        u32 data_ptr;
        u32 data_size;
 };
@@ -13154,6 +13155,7 @@ struct nvm_cfg1_port {
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET         0
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G             0x1
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G            0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G             0x4
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G            0x8
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G            0x10
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G            0x20
@@ -13164,6 +13166,7 @@ struct nvm_cfg1_port {
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG                   0x0
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_1G                                0x1
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_10G                       0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G                        0x3
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_25G                       0x4
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_40G                       0x5
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_50G                       0x6
index f99797a149a4229a6e6209436e30b818989e7ccf..beb8e5d6401a99e85667c2b12ad17e4a36968b8c 100644 (file)
@@ -1709,7 +1709,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
 
                cm_info->local_ip[0] = ntohl(iph->daddr);
                cm_info->remote_ip[0] = ntohl(iph->saddr);
-               cm_info->ip_version = TCP_IPV4;
+               cm_info->ip_version = QED_TCP_IPV4;
 
                ip_hlen = (iph->ihl) * sizeof(u32);
                *payload_len = ntohs(iph->tot_len) - ip_hlen;
@@ -1729,7 +1729,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
                        cm_info->remote_ip[i] =
                            ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
                }
-               cm_info->ip_version = TCP_IPV6;
+               cm_info->ip_version = QED_TCP_IPV6;
 
                ip_hlen = sizeof(*ip6h);
                *payload_len = ntohs(ip6h->payload_len);
index 14ac9cab265341b9a7d2d1c10fa037ea6e4dd20f..aa633381aa47ea3bc13de4829bde316448ee0d6b 100644 (file)
@@ -63,8 +63,8 @@
 #include "qed_sp.h"
 #include "qed_rdma.h"
 
-#define QED_LL2_RX_REGISTERED(ll2)     ((ll2)->rx_queue.b_cb_registred)
-#define QED_LL2_TX_REGISTERED(ll2)     ((ll2)->tx_queue.b_cb_registred)
+#define QED_LL2_RX_REGISTERED(ll2)     ((ll2)->rx_queue.b_cb_registered)
+#define QED_LL2_TX_REGISTERED(ll2)     ((ll2)->tx_queue.b_cb_registered)
 
 #define QED_LL2_TX_SIZE (256)
 #define QED_LL2_RX_SIZE (4096)
@@ -796,7 +796,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
                tx_pkt.vlan = p_buffer->vlan;
                tx_pkt.bd_flags = bd_flags;
                tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
-               tx_pkt.tx_dest = p_ll2_conn->tx_dest;
+               switch (p_ll2_conn->tx_dest) {
+               case CORE_TX_DEST_NW:
+                       tx_pkt.tx_dest = QED_LL2_TX_DEST_NW;
+                       break;
+               case CORE_TX_DEST_LB:
+                       tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
+                       break;
+               case CORE_TX_DEST_DROP:
+               default:
+                       tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
+                       break;
+               }
                tx_pkt.first_frag = first_frag;
                tx_pkt.first_frag_len = p_buffer->packet_length;
                tx_pkt.cookie = p_buffer;
@@ -1404,7 +1415,7 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
                                    &p_hwfn->p_ll2_info[i],
                                    &p_ll2_info->rx_queue.rx_sb_index,
                                    &p_ll2_info->rx_queue.p_fw_cons);
-               p_ll2_info->rx_queue.b_cb_registred = true;
+               p_ll2_info->rx_queue.b_cb_registered = true;
        }
 
        if (data->input.tx_num_desc) {
@@ -1413,7 +1424,7 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
                                    &p_hwfn->p_ll2_info[i],
                                    &p_ll2_info->tx_queue.tx_sb_index,
                                    &p_ll2_info->tx_queue.p_fw_cons);
-               p_ll2_info->tx_queue.b_cb_registred = true;
+               p_ll2_info->tx_queue.b_cb_registered = true;
        }
 
        *data->p_connection_handle = i;
@@ -1929,7 +1940,7 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
 
        /* Stop Tx & Rx of connection, if needed */
        if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
-               p_ll2_conn->tx_queue.b_cb_registred = false;
+               p_ll2_conn->tx_queue.b_cb_registered = false;
                smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
                rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
                if (rc)
@@ -1940,7 +1951,7 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
        }
 
        if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
-               p_ll2_conn->rx_queue.b_cb_registred = false;
+               p_ll2_conn->rx_queue.b_cb_registered = false;
                smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
                rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
                if (rc)
index f65817012e9722e5a47458d30a47ef79209075a7..1a5c1ae014745d2f3f8619007c90edaf2fe08a68 100644 (file)
@@ -79,7 +79,7 @@ struct qed_ll2_rx_queue {
        struct qed_chain rxq_chain;
        struct qed_chain rcq_chain;
        u8 rx_sb_index;
-       bool b_cb_registred;
+       bool b_cb_registered;
        __le16 *p_fw_cons;
        struct list_head active_descq;
        struct list_head free_descq;
@@ -93,7 +93,7 @@ struct qed_ll2_tx_queue {
        spinlock_t lock;
        struct qed_chain txq_chain;
        u8 tx_sb_index;
-       bool b_cb_registred;
+       bool b_cb_registered;
        __le16 *p_fw_cons;
        struct list_head active_descq;
        struct list_head free_descq;
index 2094d86a7a087dac2eed0fe77bd71f66e7d975c7..75d217aaf8cec142dbe572eb6f5abc101acabf31 100644 (file)
@@ -1337,6 +1337,9 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
                if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
                        link_params->speed.advertised_speeds |=
                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+               if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
+                       link_params->speed.advertised_speeds |=
+                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
                if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
                        link_params->speed.advertised_speeds |=
                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
@@ -1502,6 +1505,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
        if (params.speed.advertised_speeds &
            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
                if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
+       if (params.speed.advertised_speeds &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+               if_link->advertised_caps |= QED_LM_20000baseKR2_Full_BIT;
        if (params.speed.advertised_speeds &
            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
                if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
@@ -1522,6 +1528,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
        if (link_caps.speed_capabilities &
            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
                if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
+       if (link_caps.speed_capabilities &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+               if_link->supported_caps |= QED_LM_20000baseKR2_Full_BIT;
        if (link_caps.speed_capabilities &
            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
                if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
@@ -1559,6 +1568,8 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
                if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
        if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
                if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
+       if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G)
+               if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT;
        if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
                if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
        if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
index 63931dfc77cf2236b868d9125c7d393327ffaad6..ffac4ac8739418f82f880c401fd205bc08130dad 100644 (file)
@@ -351,11 +351,9 @@ void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
                          struct qed_ooo_info *p_ooo_info,
                          u32 cid, u8 drop_isle, u8 drop_size)
 {
-       struct qed_ooo_archipelago *p_archipelago = NULL;
        struct qed_ooo_isle *p_isle = NULL;
        u8 isle_idx;
 
-       p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
        for (isle_idx = 0; isle_idx < drop_size; isle_idx++) {
                p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle);
                if (!p_isle) {
@@ -460,7 +458,6 @@ void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
 void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
                        struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle)
 {
-       struct qed_ooo_archipelago *p_archipelago = NULL;
        struct qed_ooo_isle *p_right_isle = NULL;
        struct qed_ooo_isle *p_left_isle = NULL;
 
@@ -473,7 +470,6 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
                return;
        }
 
-       p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
        list_del(&p_right_isle->list_entry);
        p_ooo_info->cur_isles_number--;
        if (left_isle) {
index be941cfaa2d4fdf9f50eedd6467033617bfcdba7..c71391b9c757a1b03f55f21cc641c4718bbce719 100644 (file)
@@ -228,7 +228,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
                                 num_cons, "Toggle");
        if (rc) {
                DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
-                          "Failed to allocate toogle bits, rc = %d\n", rc);
+                          "Failed to allocate toggle bits, rc = %d\n", rc);
                goto free_cq_map;
        }
 
index 7d7a64c55ff1fc2033e4ee273be0b40d10df8e74..f9167d1354bbef3ccf2e972e8c002e64bbc24cce 100644 (file)
@@ -140,23 +140,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
 
 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
 {
-       enum roce_flavor flavor;
-
        switch (roce_mode) {
        case ROCE_V1:
-               flavor = PLAIN_ROCE;
-               break;
+               return PLAIN_ROCE;
        case ROCE_V2_IPV4:
-               flavor = RROCE_IPV4;
-               break;
+               return RROCE_IPV4;
        case ROCE_V2_IPV6:
-               flavor = ROCE_V2_IPV6;
-               break;
+               return RROCE_IPV6;
        default:
-               flavor = MAX_ROCE_MODE;
-               break;
+               return MAX_ROCE_FLAVOR;
        }
-       return flavor;
 }
 
 static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
index 8de644b4721efd63a7d3efa410139228d0b2f739..77b6248ad3b97d3a45caf27825faddabf9695a5b 100644 (file)
@@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
 static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
                                  struct qed_tunnel_info *p_src)
 {
-       enum tunnel_clss type;
+       int type;
 
        p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
        p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
index 3d42696598202591794613afebea7ed42d51be6e..be118d057b92c5ad494690b7c80c98140dbb8e7a 100644 (file)
@@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
        }
 
        if (!p_iov->b_pre_fp_hsi &&
-           ETH_HSI_VER_MINOR &&
            (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
                DP_INFO(p_hwfn,
                        "PF is using older fastpath HSI; %02x.%02x is configured\n",
@@ -572,7 +571,7 @@ free_p_iov:
 static void
 __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
                           struct qed_tunn_update_type *p_src,
-                          enum qed_tunn_clss mask, u8 *p_cls)
+                          enum qed_tunn_mode mask, u8 *p_cls)
 {
        if (p_src->b_update_mode) {
                p_req->tun_mode_update_mask |= BIT(mask);
@@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
 static void
 qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
                         struct qed_tunn_update_type *p_src,
-                        enum qed_tunn_clss mask,
+                        enum qed_tunn_mode mask,
                         u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
                         u8 *p_update_port, u16 *p_udp_port)
 {
index 19652cd27ca78afd8de31214b59f5f7f310a8f1c..7ff50b4488f61ab66d3b8926766c187e85057493 100644 (file)
@@ -420,6 +420,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = {
        {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT},
        {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
        {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
+       {QED_LM_20000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT},
        {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
        {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
        {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
@@ -535,6 +536,14 @@ static int qede_set_link_ksettings(struct net_device *dev,
                        }
                        params.adv_speeds = QED_LM_10000baseKR_Full_BIT;
                        break;
+               case SPEED_20000:
+                       if (!(current_link.supported_caps &
+                             QED_LM_20000baseKR2_Full_BIT)) {
+                               DP_INFO(edev, "20G speed not supported\n");
+                               return -EINVAL;
+                       }
+                       params.adv_speeds = QED_LM_20000baseKR2_Full_BIT;
+                       break;
                case SPEED_25000:
                        if (!(current_link.supported_caps &
                              QED_LM_25000baseKR_Full_BIT)) {
index 81312924df1407092fd1dd43cc0555d16976160b..0c443ea98479ac0971a6e36c28bd8bde2f080bfa 100644 (file)
@@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
        int (*config_loopback) (struct qlcnic_adapter *, u8);
        int (*clear_loopback) (struct qlcnic_adapter *, u8);
        int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
-       void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+       void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+                                u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
        int (*get_board_info) (struct qlcnic_adapter *);
        void (*set_mac_filter_count) (struct qlcnic_adapter *);
        void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
 }
 
 static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
-                                       u64 *addr, u16 id)
+                                       u64 *addr, u16 vlan,
+                                       struct qlcnic_host_tx_ring *tx_ring)
 {
-       adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+       adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
 }
 
 static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
index 569d54ededeca2e6472a3f8502e91c45be8e5232..a79d84f9910229515acf900e8286f71b8a010ae1 100644 (file)
@@ -2135,7 +2135,8 @@ out:
 }
 
 void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
-                                 u16 vlan_id)
+                                 u16 vlan_id,
+                                 struct qlcnic_host_tx_ring *tx_ring)
 {
        u8 mac[ETH_ALEN];
        memcpy(&mac, addr, ETH_ALEN);
index b75a812468569de7728fd9c654b6f1c7e353729f..73fe2f64491de24408d893a3eb91ffb691fe4f03 100644 (file)
@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
 int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
 int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
 int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+                                 u16 vlan, struct qlcnic_host_tx_ring *ring);
 int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
 int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
 void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
index 4bb33af8e2b3a956db02847bfebfa6ef2362bb3b..56a3bd9e37dcd773e9d8b1d52366eaa971506fa6 100644 (file)
@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
                         struct net_device *netdev);
 void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
-                              u64 *uaddr, u16 vlan_id);
+                              u64 *uaddr, u16 vlan_id,
+                              struct qlcnic_host_tx_ring *tx_ring);
 int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
                                     struct ethtool_coalesce *);
 int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
index 84dd83031a1bfcc31c0f8a908fef0c1bb3e7d155..9647578cbe6a8fec82409c4eadf9aee02f6c7971 100644 (file)
@@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
 }
 
 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
-                              u16 vlan_id)
+                              u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
 {
        struct cmd_desc_type0 *hwdesc;
        struct qlcnic_nic_req *req;
        struct qlcnic_mac_req *mac_req;
        struct qlcnic_vlan_req *vlan_req;
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
        u32 producer;
        u64 word;
 
@@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
 
 static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                               struct cmd_desc_type0 *first_desc,
-                              struct sk_buff *skb)
+                              struct sk_buff *skb,
+                              struct qlcnic_host_tx_ring *tx_ring)
 {
        struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
        struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                    tmp_fil->vlan_id == vlan_id) {
                        if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
                                qlcnic_change_filter(adapter, &src_addr,
-                                                    vlan_id);
+                                                    vlan_id, tx_ring);
                        tmp_fil->ftime = jiffies;
                        return;
                }
@@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
        if (!fil)
                return;
 
-       qlcnic_change_filter(adapter, &src_addr, vlan_id);
+       qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
        fil->ftime = jiffies;
        fil->vlan_id = vlan_id;
        memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        }
 
        if (adapter->drv_mac_learn)
-               qlcnic_send_filter(adapter, first_desc, skb);
+               qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
 
        tx_ring->tx_stats.tx_bytes += skb->len;
        tx_ring->tx_stats.xmit_called++;
index 2d38d1ac2aae58fd210030c7b143011f76b921cc..dbd48012224f2467d27134eedc692a68b92b1a04 100644 (file)
@@ -59,9 +59,6 @@ static int qlcnic_close(struct net_device *netdev);
 static void qlcnic_tx_timeout(struct net_device *netdev);
 static void qlcnic_attach_work(struct work_struct *work);
 static void qlcnic_fwinit_work(struct work_struct *work);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev);
-#endif
 
 static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
 static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -545,9 +542,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
        .ndo_udp_tunnel_add     = qlcnic_add_vxlan_port,
        .ndo_udp_tunnel_del     = qlcnic_del_vxlan_port,
        .ndo_features_check     = qlcnic_features_check,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = qlcnic_poll_controller,
-#endif
 #ifdef CONFIG_QLCNIC_SRIOV
        .ndo_set_vf_mac         = qlcnic_sriov_set_vf_mac,
        .ndo_set_vf_rate        = qlcnic_sriov_set_vf_tx_rate,
@@ -3200,45 +3194,6 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev)
-{
-       struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_recv_context *recv_ctx;
-       struct qlcnic_host_tx_ring *tx_ring;
-       int ring;
-
-       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
-               return;
-
-       recv_ctx = adapter->recv_ctx;
-
-       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               qlcnic_disable_sds_intr(adapter, sds_ring);
-               napi_schedule(&sds_ring->napi);
-       }
-
-       if (adapter->flags & QLCNIC_MSIX_ENABLED) {
-               /* Only Multi-Tx queue capable devices need to
-                * schedule NAPI for TX rings
-                */
-               if ((qlcnic_83xx_check(adapter) &&
-                    (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
-                   (qlcnic_82xx_check(adapter) &&
-                    !qlcnic_check_multi_tx(adapter)))
-                       return;
-
-               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
-                       tx_ring = &adapter->tx_ring[ring];
-                       qlcnic_disable_tx_intr(adapter, tx_ring);
-                       napi_schedule(&tx_ring->napi);
-               }
-       }
-}
-#endif
-
 static void
 qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
 {
index 7fd86d40a3374df1fba991ece10e6ec48bc197e1..11167abe5934d3a2d2d71f6cb0f7674d665d9d5b 100644 (file)
@@ -113,7 +113,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
        struct sk_buff *skbn;
 
        if (skb->dev->type == ARPHRD_ETHER) {
-               if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
+               if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
                        kfree_skb(skb);
                        return;
                }
@@ -147,7 +147,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
        }
 
        if (skb_headroom(skb) < required_headroom) {
-               if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
+               if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
                        return -ENOMEM;
        }
 
@@ -189,6 +189,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
        if (!skb)
                goto done;
 
+       if (skb->pkt_type == PACKET_LOOPBACK)
+               return RX_HANDLER_PASS;
+
        dev = skb->dev;
        port = rmnet_get_port(dev);
 
index ed8ffd498c887975026af28f9192be78f8b21657..7d3f671e1bb367f4f206aae742cc1d865c37f4c3 100644 (file)
@@ -4059,13 +4059,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
 
        genphy_soft_reset(dev->phydev);
 
-       /* It was reported that chip version 33 ends up with 10MBit/Half on a
+       /* It was reported that several chips end up with 10MBit/Half on a
         * 1GBit link after resuming from S3. For whatever reason the PHY on
-        * this chip doesn't properly start a renegotiation when soft-reset.
+        * these chips doesn't properly start a renegotiation when soft-reset.
         * Explicitly requesting a renegotiation fixes this.
         */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_33 &&
-           dev->phydev->autoneg == AUTONEG_ENABLE)
+       if (dev->phydev->autoneg == AUTONEG_ENABLE)
                phy_restart_aneg(dev->phydev);
 }
 
@@ -4523,9 +4522,14 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
 
 static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
 {
-       /* Set DMA burst size and Interframe Gap Time */
-       RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
-               (InterFrameGap << TxInterFrameGapShift));
+       u32 val = TX_DMA_BURST << TxDMAShift |
+                 InterFrameGap << TxInterFrameGapShift;
+
+       if (tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+           tp->mac_version != RTL_GIGA_MAC_VER_39)
+               val |= TXCFG_AUTO_FIFO;
+
+       RTL_W32(tp, TxConfig, val);
 }
 
 static void rtl_set_rx_max_size(struct rtl8169_private *tp)
@@ -5020,7 +5024,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 
        rtl_disable_clock_request(tp);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
 
        /* Adjust EEE LED frequency */
@@ -5054,7 +5057,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
 
        rtl_disable_clock_request(tp);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
        RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
        RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
@@ -5099,8 +5101,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
 
 static void rtl_hw_start_8168g(struct rtl8169_private *tp)
 {
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
        rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5198,8 +5198,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
        rtl_hw_aspm_clkreq_enable(tp, false);
        rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
        rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5282,8 +5280,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
 {
        rtl8168ep_stop_cmac(tp);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
        rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
@@ -5605,7 +5601,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
        /* Force LAN exit from ASPM if Rx/Tx are not idle */
        RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
 
        rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -6856,8 +6851,10 @@ static int rtl8169_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *dev = pci_get_drvdata(pdev);
+       struct rtl8169_private *tp = netdev_priv(dev);
 
        rtl8169_net_suspend(dev);
+       clk_disable_unprepare(tp->clk);
 
        return 0;
 }
@@ -6885,6 +6882,9 @@ static int rtl8169_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *dev = pci_get_drvdata(pdev);
+       struct rtl8169_private *tp = netdev_priv(dev);
+
+       clk_prepare_enable(tp->clk);
 
        if (netif_running(dev))
                __rtl8169_resume(dev);
index 330233286e785254f5f29c87f9557a305974f606..3d0dd39c289e05b8a7a6778363461ef5698dc62b 100644 (file)
@@ -2206,29 +2206,6 @@ static void efx_fini_napi(struct efx_nic *efx)
                efx_fini_napi_channel(channel);
 }
 
-/**************************************************************************
- *
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void efx_netpoll(struct net_device *net_dev)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_channel *channel;
-
-       efx_for_each_channel(channel, efx)
-               efx_schedule_channel(channel);
-}
-
-#endif
-
 /**************************************************************************
  *
  * Kernel net device interface
@@ -2509,9 +2486,6 @@ static const struct net_device_ops efx_netdev_ops = {
 #endif
        .ndo_get_phys_port_id   = efx_get_phys_port_id,
        .ndo_get_phys_port_name = efx_get_phys_port_name,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = efx_netpoll,
-#endif
        .ndo_setup_tc           = efx_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = efx_filter_rfs,
index dd5530a4f8c8936868aed7171bd9481f93730d76..03e2455c502eacd9a4fd5c7fd320a9edcf265f77 100644 (file)
@@ -2052,29 +2052,6 @@ static void ef4_fini_napi(struct ef4_nic *efx)
                ef4_fini_napi_channel(channel);
 }
 
-/**************************************************************************
- *
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void ef4_netpoll(struct net_device *net_dev)
-{
-       struct ef4_nic *efx = netdev_priv(net_dev);
-       struct ef4_channel *channel;
-
-       ef4_for_each_channel(channel, efx)
-               ef4_schedule_channel(channel);
-}
-
-#endif
-
 /**************************************************************************
  *
  * Kernel net device interface
@@ -2250,9 +2227,6 @@ static const struct net_device_ops ef4_netdev_ops = {
        .ndo_set_mac_address    = ef4_set_mac_address,
        .ndo_set_rx_mode        = ef4_set_rx_mode,
        .ndo_set_features       = ef4_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = ef4_netpoll,
-#endif
        .ndo_setup_tc           = ef4_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = ef4_filter_rfs,
index 2a156dcd45341214f74d0eb1c67490526f1d518f..6732f5cbde081052ce9e1c2417451118cadbff8f 100644 (file)
@@ -1116,11 +1116,8 @@ static void ave_phy_adjust_link(struct net_device *ndev)
                        rmt_adv |= LPA_PAUSE_CAP;
                if (phydev->asym_pause)
                        rmt_adv |= LPA_PAUSE_ASYM;
-               if (phydev->advertising & ADVERTISED_Pause)
-                       lcl_adv |= ADVERTISE_PAUSE_CAP;
-               if (phydev->advertising & ADVERTISED_Asym_Pause)
-                       lcl_adv |= ADVERTISE_PAUSE_ASYM;
 
+               lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
                cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
                if (cap & FLOW_CTRL_TX)
                        txcr |= AVE_TXCR_FLOCTR;
index 6625fabe2c8870379565c03f99edabb2b857b66e..82eccc930c5ca01e0aa017e497c1cbe2e24c67f8 100644 (file)
@@ -1325,11 +1325,15 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
                info->key.tun_id = tunid;
        }
 
-       if (data[IFLA_GENEVE_TTL])
+       if (data[IFLA_GENEVE_TTL_INHERIT]) {
+               if (nla_get_u8(data[IFLA_GENEVE_TTL_INHERIT]))
+                       *ttl_inherit = true;
+               else
+                       *ttl_inherit = false;
+       } else if (data[IFLA_GENEVE_TTL]) {
                info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
-
-       if (data[IFLA_GENEVE_TTL_INHERIT])
-               *ttl_inherit = true;
+               *ttl_inherit = false;
+       }
 
        if (data[IFLA_GENEVE_TOS])
                info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
index 16ec7af6ab7b3f53bd1e7165819c76b99736bf86..ba9df430fca6e45ea10db1dc9d762ce700102121 100644 (file)
@@ -966,6 +966,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                                 sizeof(struct yamdrv_ioctl_mcs));
                if (IS_ERR(ym))
                        return PTR_ERR(ym);
+               if (ym->cmd != SIOCYAMSMCS)
+                       return -EINVAL;
                if (ym->bitrate > YAM_MAXBITRATE) {
                        kfree(ym);
                        return -EINVAL;
@@ -981,6 +983,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
                         return -EFAULT;
 
+               if (yi.cmd != SIOCYAMSCFG)
+                       return -EINVAL;
                if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
                        return -EINVAL;         /* Cannot change this parameter when up */
                if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
index ec699741170b6488c2a19a23df3931fcee627d88..9bcaf204a7d4ce8846813ae5335944d65bc24e0b 100644 (file)
@@ -226,6 +226,7 @@ static inline void *init_ppi_data(struct rndis_message *msg,
 
        ppi->size = ppi_size;
        ppi->type = pkt_type;
+       ppi->internal = 0;
        ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
 
        rndis_pkt->per_pkt_info_len += ppi_size;
@@ -1564,26 +1565,6 @@ netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
        return -EOPNOTSUPP;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netvsc_poll_controller(struct net_device *dev)
-{
-       struct net_device_context *ndc = netdev_priv(dev);
-       struct netvsc_device *ndev;
-       int i;
-
-       rcu_read_lock();
-       ndev = rcu_dereference(ndc->nvdev);
-       if (ndev) {
-               for (i = 0; i < ndev->num_chn; i++) {
-                       struct netvsc_channel *nvchan = &ndev->chan_table[i];
-
-                       napi_schedule(&nvchan->napi);
-               }
-       }
-       rcu_read_unlock();
-}
-#endif
-
 static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
 {
        return NETVSC_HASH_KEYLEN;
@@ -1811,9 +1792,6 @@ static const struct net_device_ops device_ops = {
        .ndo_set_mac_address =          netvsc_set_mac_addr,
        .ndo_select_queue =             netvsc_select_queue,
        .ndo_get_stats64 =              netvsc_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller =          netvsc_poll_controller,
-#endif
 };
 
 /*
index 23a52b9293f35eaec1d71063305a029ba466d819..cd1d8faccca5fb36b488312d734d5e42cebb7b1a 100644 (file)
@@ -1308,8 +1308,7 @@ static int adf7242_remove(struct spi_device *spi)
 {
        struct adf7242_local *lp = spi_get_drvdata(spi);
 
-       if (!IS_ERR_OR_NULL(lp->debugfs_root))
-               debugfs_remove_recursive(lp->debugfs_root);
+       debugfs_remove_recursive(lp->debugfs_root);
 
        cancel_delayed_work_sync(&lp->work);
        destroy_workqueue(lp->wqueue);
index 58299fb666ed4d84fb7ea01a76aabb86575ab939..0ff5a403a8dc356a359fb085be26379ca011b67b 100644 (file)
@@ -634,10 +634,9 @@ static int ca8210_test_int_driver_write(
        for (i = 0; i < len; i++)
                dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]);
 
-       fifo_buffer = kmalloc(len, GFP_KERNEL);
+       fifo_buffer = kmemdup(buf, len, GFP_KERNEL);
        if (!fifo_buffer)
                return -ENOMEM;
-       memcpy(fifo_buffer, buf, len);
        kfifo_in(&test->up_fifo, &fifo_buffer, 4);
        wake_up_interruptible(&priv->test.readq);
 
@@ -3044,8 +3043,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
 {
        struct ca8210_test *test = &priv->test;
 
-       if (!IS_ERR(test->ca8210_dfs_spi_int))
-               debugfs_remove(test->ca8210_dfs_spi_int);
+       debugfs_remove(test->ca8210_dfs_spi_int);
        kfifo_free(&test->up_fifo);
        dev_info(&priv->spi->dev, "Test interface removed\n");
 }
index bf70ab892e697865eeffd69a01674da634127885..51b5198d5943422bc61e2201e830176d733d006b 100644 (file)
@@ -37,8 +37,6 @@ MODULE_LICENSE("GPL");
 static LIST_HEAD(hwsim_phys);
 static DEFINE_MUTEX(hwsim_phys_lock);
 
-static LIST_HEAD(hwsim_ifup_phys);
-
 static struct platform_device *mac802154hwsim_dev;
 
 /* MAC802154_HWSIM netlink family */
@@ -85,7 +83,6 @@ struct hwsim_phy {
        struct list_head edges;
 
        struct list_head list;
-       struct list_head list_ifup;
 };
 
 static int hwsim_add_one(struct genl_info *info, struct device *dev,
@@ -159,9 +156,6 @@ static int hwsim_hw_start(struct ieee802154_hw *hw)
        struct hwsim_phy *phy = hw->priv;
 
        phy->suspended = false;
-       list_add_rcu(&phy->list_ifup, &hwsim_ifup_phys);
-       synchronize_rcu();
-
        return 0;
 }
 
@@ -170,8 +164,6 @@ static void hwsim_hw_stop(struct ieee802154_hw *hw)
        struct hwsim_phy *phy = hw->priv;
 
        phy->suspended = true;
-       list_del_rcu(&phy->list_ifup);
-       synchronize_rcu();
 }
 
 static int
index e428277781ac4422bec2e8f47fd35476a85a74f7..44de81e5f140fba3f6efd3c02932c2c8346f67b8 100644 (file)
@@ -132,11 +132,6 @@ static const struct reg_sequence mar20a_iar_overwrites[] = {
 };
 
 #define MCR20A_VALID_CHANNELS (0x07FFF800)
-
-struct mcr20a_platform_data {
-       int rst_gpio;
-};
-
 #define MCR20A_MAX_BUF         (127)
 
 #define printdev(X) (&X->spi->dev)
@@ -412,7 +407,6 @@ struct mcr20a_local {
        struct spi_device *spi;
 
        struct ieee802154_hw *hw;
-       struct mcr20a_platform_data *pdata;
        struct regmap *regmap_dar;
        struct regmap *regmap_iar;
 
@@ -903,19 +897,19 @@ mcr20a_irq_clean_complete(void *context)
 
        switch (seq_state) {
        /* TX IRQ, RX IRQ and SEQ IRQ */
-       case (0x03):
+       case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
                if (lp->is_tx) {
                        lp->is_tx = 0;
                        dev_dbg(printdev(lp), "TX is done. No ACK\n");
                        mcr20a_handle_tx_complete(lp);
                }
                break;
-       case (0x05):
+       case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
                        /* rx is starting */
                        dev_dbg(printdev(lp), "RX is starting\n");
                        mcr20a_handle_rx(lp);
                break;
-       case (0x07):
+       case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
                if (lp->is_tx) {
                        /* tx is done */
                        lp->is_tx = 0;
@@ -927,7 +921,7 @@ mcr20a_irq_clean_complete(void *context)
                        mcr20a_handle_rx(lp);
                }
                break;
-       case (0x01):
+       case (DAR_IRQSTS1_SEQIRQ):
                if (lp->is_tx) {
                        dev_dbg(printdev(lp), "TX is starting\n");
                        mcr20a_handle_tx(lp);
@@ -976,20 +970,6 @@ static irqreturn_t mcr20a_irq_isr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static int mcr20a_get_platform_data(struct spi_device *spi,
-                                   struct mcr20a_platform_data *pdata)
-{
-       int ret = 0;
-
-       if (!spi->dev.of_node)
-               return -EINVAL;
-
-       pdata->rst_gpio = of_get_named_gpio(spi->dev.of_node, "rst_b-gpio", 0);
-       dev_dbg(&spi->dev, "rst_b-gpio: %d\n", pdata->rst_gpio);
-
-       return ret;
-}
-
 static void mcr20a_hw_setup(struct mcr20a_local *lp)
 {
        u8 i;
@@ -1249,7 +1229,7 @@ mcr20a_probe(struct spi_device *spi)
 {
        struct ieee802154_hw *hw;
        struct mcr20a_local *lp;
-       struct mcr20a_platform_data *pdata;
+       struct gpio_desc *rst_b;
        int irq_type;
        int ret = -ENOMEM;
 
@@ -1260,48 +1240,32 @@ mcr20a_probe(struct spi_device *spi)
                return -EINVAL;
        }
 
-       pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
-       if (!pdata)
-               return -ENOMEM;
-
-       /* set mcr20a platform data */
-       ret = mcr20a_get_platform_data(spi, pdata);
-       if (ret < 0) {
-               dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n");
-               goto free_pdata;
-       }
-
-       /* init reset gpio */
-       if (gpio_is_valid(pdata->rst_gpio)) {
-               ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio,
-                                           GPIOF_OUT_INIT_HIGH, "reset");
-               if (ret)
-                       goto free_pdata;
+       rst_b = devm_gpiod_get(&spi->dev, "rst_b", GPIOD_OUT_HIGH);
+       if (IS_ERR(rst_b)) {
+               ret = PTR_ERR(rst_b);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&spi->dev, "Failed to get 'rst_b' gpio: %d", ret);
+               return ret;
        }
 
        /* reset mcr20a */
-       if (gpio_is_valid(pdata->rst_gpio)) {
-               usleep_range(10, 20);
-               gpio_set_value_cansleep(pdata->rst_gpio, 0);
-               usleep_range(10, 20);
-               gpio_set_value_cansleep(pdata->rst_gpio, 1);
-               usleep_range(120, 240);
-       }
+       usleep_range(10, 20);
+       gpiod_set_value_cansleep(rst_b, 1);
+       usleep_range(10, 20);
+       gpiod_set_value_cansleep(rst_b, 0);
+       usleep_range(120, 240);
 
        /* allocate ieee802154_hw and private data */
        hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops);
        if (!hw) {
                dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n");
-               ret = -ENOMEM;
-               goto free_pdata;
+               return ret;
        }
 
        /* init mcr20a local data */
        lp = hw->priv;
        lp->hw = hw;
        lp->spi = spi;
-       lp->spi->dev.platform_data = pdata;
-       lp->pdata = pdata;
 
        /* init ieee802154_hw */
        hw->parent = &spi->dev;
@@ -1370,8 +1334,6 @@ mcr20a_probe(struct spi_device *spi)
 
 free_dev:
        ieee802154_free_hw(lp->hw);
-free_pdata:
-       kfree(pdata);
 
        return ret;
 }
index 319edc9c8ec7f04c533c1d61d3bb959ca2d445e9..632472cab3bbb4094187ba2d01d836b8fd2d8285 100644 (file)
@@ -115,7 +115,7 @@ static struct phy_driver aquantia_driver[] = {
        .phy_id         = PHY_ID_AQ1202,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Aquantia AQ1202",
-       .features       = PHY_AQUANTIA_FEATURES,
+       .features       = PHY_10GBIT_FULL_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
        .aneg_done      = genphy_c45_aneg_done,
        .config_aneg    = aquantia_config_aneg,
@@ -127,7 +127,7 @@ static struct phy_driver aquantia_driver[] = {
        .phy_id         = PHY_ID_AQ2104,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Aquantia AQ2104",
-       .features       = PHY_AQUANTIA_FEATURES,
+       .features       = PHY_10GBIT_FULL_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
        .aneg_done      = genphy_c45_aneg_done,
        .config_aneg    = aquantia_config_aneg,
@@ -139,7 +139,7 @@ static struct phy_driver aquantia_driver[] = {
        .phy_id         = PHY_ID_AQR105,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Aquantia AQR105",
-       .features       = PHY_AQUANTIA_FEATURES,
+       .features       = PHY_10GBIT_FULL_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
        .aneg_done      = genphy_c45_aneg_done,
        .config_aneg    = aquantia_config_aneg,
@@ -151,7 +151,7 @@ static struct phy_driver aquantia_driver[] = {
        .phy_id         = PHY_ID_AQR106,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Aquantia AQR106",
-       .features       = PHY_AQUANTIA_FEATURES,
+       .features       = PHY_10GBIT_FULL_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
        .aneg_done      = genphy_c45_aneg_done,
        .config_aneg    = aquantia_config_aneg,
@@ -163,7 +163,7 @@ static struct phy_driver aquantia_driver[] = {
        .phy_id         = PHY_ID_AQR107,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Aquantia AQR107",
-       .features       = PHY_AQUANTIA_FEATURES,
+       .features       = PHY_10GBIT_FULL_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
        .aneg_done      = genphy_c45_aneg_done,
        .config_aneg    = aquantia_config_aneg,
@@ -175,7 +175,7 @@ static struct phy_driver aquantia_driver[] = {
        .phy_id         = PHY_ID_AQR405,
        .phy_id_mask    = 0xfffffff0,
        .name           = "Aquantia AQR405",
-       .features       = PHY_AQUANTIA_FEATURES,
+       .features       = PHY_10GBIT_FULL_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
        .aneg_done      = genphy_c45_aneg_done,
        .config_aneg    = aquantia_config_aneg,
index 411cf1072bae57967034e51b543aa6e154fc5e06..e74a047a846e25b93feb31491d1e2e01d42fdd59 100644 (file)
@@ -357,7 +357,7 @@ static int at803x_aneg_done(struct phy_device *phydev)
 
        /* check if the SGMII link is OK. */
        if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) {
-               pr_warn("803x_aneg_done: SGMII link is not ok\n");
+               phydev_warn(phydev, "803x_aneg_done: SGMII link is not ok\n");
                aneg_done = 0;
        }
        /* switch back to copper page */
index cf14613745c976068ab26323ab858ad95f37cdfe..d95bffdec4c14fe0e57a7920c21d673b13536eec 100644 (file)
@@ -42,6 +42,9 @@ static int bcm63xx_config_init(struct phy_device *phydev)
 {
        int reg, err;
 
+       /* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */
+       phydev->supported |= SUPPORTED_Pause;
+
        reg = phy_read(phydev, MII_BCM63XX_IR);
        if (reg < 0)
                return reg;
@@ -65,8 +68,7 @@ static struct phy_driver bcm63xx_driver[] = {
        .phy_id         = 0x00406000,
        .phy_id_mask    = 0xfffffc00,
        .name           = "Broadcom BCM63XX (1)",
-       /* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */
-       .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+       .features       = PHY_BASIC_FEATURES,
        .flags          = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
        .config_init    = bcm63xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
@@ -75,8 +77,7 @@ static struct phy_driver bcm63xx_driver[] = {
        /* same phy as above, with just a different OUI */
        .phy_id         = 0x002bdc00,
        .phy_id_mask    = 0xfffffc00,
-       .name           = "Broadcom BCM63XX (2)",
-       .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+       .features       = PHY_BASIC_FEATURES,
        .flags          = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
        .config_init    = bcm63xx_config_init,
        .ack_interrupt  = bcm_phy_ack_intr,
index 29aa8d772b0c1050b145a8d5fddcfa132bda9f97..edd4d44a386de2bc262a026e4124a591eeff58b2 100644 (file)
@@ -553,16 +553,17 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
        mutex_unlock(&clock->extreg_lock);
 
        if (!phydev->attached_dev) {
-               pr_warn("expected to find an attached netdevice\n");
+               phydev_warn(phydev,
+                           "expected to find an attached netdevice\n");
                return;
        }
 
        if (on) {
                if (dev_mc_add(phydev->attached_dev, status_frame_dst))
-                       pr_warn("failed to add mc address\n");
+                       phydev_warn(phydev, "failed to add mc address\n");
        } else {
                if (dev_mc_del(phydev->attached_dev, status_frame_dst))
-                       pr_warn("failed to delete mc address\n");
+                       phydev_warn(phydev, "failed to delete mc address\n");
        }
 }
 
@@ -686,9 +687,9 @@ static void recalibrate(struct dp83640_clock *clock)
         * read out and correct offsets
         */
        val = ext_read(master, PAGE4, PTP_STS);
-       pr_info("master PTP_STS  0x%04hx\n", val);
+       phydev_info(master, "master PTP_STS  0x%04hx\n", val);
        val = ext_read(master, PAGE4, PTP_ESTS);
-       pr_info("master PTP_ESTS 0x%04hx\n", val);
+       phydev_info(master, "master PTP_ESTS 0x%04hx\n", val);
        event_ts.ns_lo  = ext_read(master, PAGE4, PTP_EDATA);
        event_ts.ns_hi  = ext_read(master, PAGE4, PTP_EDATA);
        event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA);
@@ -698,15 +699,16 @@ static void recalibrate(struct dp83640_clock *clock)
        list_for_each(this, &clock->phylist) {
                tmp = list_entry(this, struct dp83640_private, list);
                val = ext_read(tmp->phydev, PAGE4, PTP_STS);
-               pr_info("slave  PTP_STS  0x%04hx\n", val);
+               phydev_info(tmp->phydev, "slave  PTP_STS  0x%04hx\n", val);
                val = ext_read(tmp->phydev, PAGE4, PTP_ESTS);
-               pr_info("slave  PTP_ESTS 0x%04hx\n", val);
+               phydev_info(tmp->phydev, "slave  PTP_ESTS 0x%04hx\n", val);
                event_ts.ns_lo  = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
                event_ts.ns_hi  = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
                event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
                event_ts.sec_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
                diff = now - (s64) phy2txts(&event_ts);
-               pr_info("slave offset %lld nanoseconds\n", diff);
+               phydev_info(tmp->phydev, "slave offset %lld nanoseconds\n",
+                           diff);
                diff += ADJTIME_FIX;
                ts = ns_to_timespec64(diff);
                tdr_write(0, tmp->phydev, &ts, PTP_STEP_CLK);
index 24fc4a73c3004af849532583d091d38c588f8884..cbec296107bdd72b677ff60b8800650489087312 100644 (file)
@@ -638,7 +638,7 @@ static void marvell_config_led(struct phy_device *phydev)
        err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL,
                              def_config);
        if (err < 0)
-               pr_warn("Fail to config marvell phy LED.\n");
+               phydev_warn(phydev, "Fail to config marvell phy LED.\n");
 }
 
 static int marvell_config_init(struct phy_device *phydev)
@@ -2201,7 +2201,7 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id = MARVELL_PHY_ID_88E1510,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1510",
-               .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
+               .features = PHY_GBIT_FIBRE_FEATURES,
                .flags = PHY_HAS_INTERRUPT,
                .probe = &m88e1510_probe,
                .config_init = &m88e1510_config_init,
index f77a2d9e7f9d85b9be5c78c961632119c018c26c..1c9d039eec63d97b9dc1bd05209e8d225703e166 100644 (file)
@@ -337,9 +337,9 @@ static int mv3310_config_init(struct phy_device *phydev)
        }
 
        if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported))
-               dev_warn(&phydev->mdio.dev,
-                        "PHY supports (%*pb) more modes than phylib supports, some modes not supported.\n",
-                        __ETHTOOL_LINK_MODE_MASK_NBITS, supported);
+               phydev_warn(phydev,
+                           "PHY supports (%*pb) more modes than phylib supports, some modes not supported.\n",
+                           __ETHTOOL_LINK_MODE_MASK_NBITS, supported);
 
        phydev->supported &= mask;
        phydev->advertising &= phydev->supported;
@@ -535,16 +535,7 @@ static struct phy_driver mv3310_drivers[] = {
                .phy_id         = 0x002b09aa,
                .phy_id_mask    = MARVELL_PHY_ID_MASK,
                .name           = "mv88x3310",
-               .features       = SUPPORTED_10baseT_Full |
-                                 SUPPORTED_10baseT_Half |
-                                 SUPPORTED_100baseT_Full |
-                                 SUPPORTED_100baseT_Half |
-                                 SUPPORTED_1000baseT_Full |
-                                 SUPPORTED_Autoneg |
-                                 SUPPORTED_TP |
-                                 SUPPORTED_FIBRE |
-                                 SUPPORTED_10000baseT_Full |
-                                 SUPPORTED_Backplane,
+               .features       = PHY_10GBIT_FEATURES,
                .soft_reset     = gen10g_no_soft_reset,
                .config_init    = mv3310_config_init,
                .probe          = mv3310_probe,
index 80b9583eaa952a7fc16a0e00d57e5a423cc04163..df75efa96a7d95cecd6e4678f5dd5dfbaa9f4095 100644 (file)
@@ -318,7 +318,7 @@ static int unimac_mdio_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int unimac_mdio_suspend(struct device *d)
+static int __maybe_unused unimac_mdio_suspend(struct device *d)
 {
        struct unimac_mdio_priv *priv = dev_get_drvdata(d);
 
@@ -327,7 +327,7 @@ static int unimac_mdio_suspend(struct device *d)
        return 0;
 }
 
-static int unimac_mdio_resume(struct device *d)
+static int __maybe_unused unimac_mdio_resume(struct device *d)
 {
        struct unimac_mdio_priv *priv = dev_get_drvdata(d);
        int ret;
index 564616968cad4ab86e6fe13ab99fbc4b1dedeb84..1546f63988319ea8857b6e9b56185ab97d4d3d1d 100644 (file)
@@ -73,8 +73,8 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
                err = of_address_to_resource(node, 0, &r);
                if (err) {
                        dev_err(&pdev->dev,
-                               "Couldn't translate address for \"%s\"\n",
-                               node->name);
+                               "Couldn't translate address for \"%pOFn\"\n",
+                               node);
                        break;
                }
 
index 2d67937866a35a1a786ce21c55ad7e03592ce023..04b12e34da586b34c7bef25299704fb3cf60e7ba 100644 (file)
@@ -88,7 +88,7 @@ static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr,
        /* Save current page */
        save_page = phy_save_page(phydev);
        if (save_page < 0) {
-               pr_warn("Failed to get current page\n");
+               phydev_warn(phydev, "Failed to get current page\n");
                goto err;
        }
 
@@ -98,14 +98,14 @@ static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr,
        ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA,
                          (data & 0xFFFF));
        if (ret < 0) {
-               pr_warn("Failed to write TR low data\n");
+               phydev_warn(phydev, "Failed to write TR low data\n");
                goto err;
        }
 
        ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA,
                          (data & 0x00FF0000) >> 16);
        if (ret < 0) {
-               pr_warn("Failed to write TR high data\n");
+               phydev_warn(phydev, "Failed to write TR high data\n");
                goto err;
        }
 
@@ -115,14 +115,15 @@ static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr,
 
        ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf);
        if (ret < 0) {
-               pr_warn("Failed to write data in reg\n");
+               phydev_warn(phydev, "Failed to write data in reg\n");
                goto err;
        }
 
        usleep_range(1000, 2000);/* Wait for Data to be written */
        val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR);
        if (!(val & 0x8000))
-               pr_warn("TR Register[0x%X] configuration failed\n", regaddr);
+               phydev_warn(phydev, "TR Register[0x%X] configuration failed\n",
+                           regaddr);
 err:
        return phy_restore_page(phydev, save_page, ret);
 }
@@ -137,7 +138,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
         */
        err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A);
        if (err < 0)
-               pr_warn("Failed to Set Register[0x0F82]\n");
+               phydev_warn(phydev, "Failed to Set Register[0x0F82]\n");
 
        /* Get access to Channel b'10, Node b'1101, Register 0x06.
         * Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv,
@@ -145,7 +146,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
         */
        err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F);
        if (err < 0)
-               pr_warn("Failed to Set Register[0x168C]\n");
+               phydev_warn(phydev, "Failed to Set Register[0x168C]\n");
 
        /* Get access to Channel b'10, Node b'1111, Register 0x11.
         * Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh
@@ -153,7 +154,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
         */
        err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620);
        if (err < 0)
-               pr_warn("Failed to Set Register[0x17A2]\n");
+               phydev_warn(phydev, "Failed to Set Register[0x17A2]\n");
 
        /* Get access to Channel b'10, Node b'1101, Register 0x10.
         * Write 24-bit value 0xEEFFDD to register. Setting
@@ -162,7 +163,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
         */
        err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD);
        if (err < 0)
-               pr_warn("Failed to Set Register[0x16A0]\n");
+               phydev_warn(phydev, "Failed to Set Register[0x16A0]\n");
 
        /* Get access to Channel b'10, Node b'1101, Register 0x13.
         * Write 24-bit value 0x071448 to register. Setting
@@ -170,7 +171,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
         */
        err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448);
        if (err < 0)
-               pr_warn("Failed to Set Register[0x16A6]\n");
+               phydev_warn(phydev, "Failed to Set Register[0x16A6]\n");
 
        /* Get access to Channel b'10, Node b'1101, Register 0x12.
         * Write 24-bit value 0x13132F to register. Setting
@@ -178,7 +179,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
         */
        err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F);
        if (err < 0)
-               pr_warn("Failed to Set Register[0x16A4]\n");
+               phydev_warn(phydev, "Failed to Set Register[0x16A4]\n");
 
        /* Get access to Channel b'10, Node b'1101, Register 0x14.
         * Write 24-bit value 0x0 to register. Setting eee_3level_delay,
@@ -186,7 +187,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
         */
        err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0);
        if (err < 0)
-               pr_warn("Failed to Set Register[0x16A8]\n");
+               phydev_warn(phydev, "Failed to Set Register[0x16A8]\n");
 
        /* Get access to Channel b'01, Node b'1111, Register 0x34.
         * Write 24-bit value 0x91B06C to register. Setting
@@ -195,7 +196,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
         */
        err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C);
        if (err < 0)
-               pr_warn("Failed to Set Register[0x0FE8]\n");
+               phydev_warn(phydev, "Failed to Set Register[0x0FE8]\n");
 
        /* Get access to Channel b'01, Node b'1111, Register 0x3E.
         * Write 24-bit value 0xC0A028 to register. Setting
@@ -204,7 +205,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
         */
        err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028);
        if (err < 0)
-               pr_warn("Failed to Set Register[0x0FFC]\n");
+               phydev_warn(phydev, "Failed to Set Register[0x0FFC]\n");
 
        /* Get access to Channel b'01, Node b'1111, Register 0x35.
         * Write 24-bit value 0x041600 to register. Setting
@@ -213,14 +214,14 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
         */
        err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600);
        if (err < 0)
-               pr_warn("Failed to Set Register[0x0FEA]\n");
+               phydev_warn(phydev, "Failed to Set Register[0x0FEA]\n");
 
        /* Get access to Channel b'10, Node b'1101, Register 0x03.
         * Write 24-bit value 0x000004 to register. Setting TrFreeze bits.
         */
        err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004);
        if (err < 0)
-               pr_warn("Failed to Set Register[0x1686]\n");
+               phydev_warn(phydev, "Failed to Set Register[0x1686]\n");
 }
 
 static int lan88xx_probe(struct phy_device *phydev)
index b1917dd1978a12e3568e296ab099058160c74c11..c600a8509d606258a9daa6efc216fbaccaa4b3bb 100644 (file)
@@ -46,7 +46,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
                .phy_id_mask    = 0xfffffff0,
                .name           = "Microchip LAN87xx T1",
 
-               .features       = SUPPORTED_100baseT_Full,
+               .features       = PHY_BASIC_T1_FEATURES,
                .flags          = PHY_HAS_INTERRUPT,
 
                .config_init    = genphy_config_init,
index 2d9676d78d3f3f0ce06934d31c6bbfd6fff478dc..bffe077dc75f0bf8a6e5817970a671463d3258a1 100644 (file)
@@ -6,6 +6,8 @@
  * Copyright (c) 2016 Microsemi Corporation
  */
 
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mdio.h>
@@ -32,6 +34,15 @@ enum rgmii_rx_clock_delay {
 #define DISABLE_HP_AUTO_MDIX_MASK        0x0080
 #define DISABLE_PAIR_SWAP_CORR_MASK      0x0020
 #define DISABLE_POLARITY_CORR_MASK       0x0010
+#define PARALLEL_DET_IGNORE_ADVERTISED    0x0008
+
+#define MSCC_PHY_EXT_CNTL_STATUS          22
+#define SMI_BROADCAST_WR_EN              0x0001
+
+#define MSCC_PHY_ERR_RX_CNT              19
+#define MSCC_PHY_ERR_FALSE_CARRIER_CNT   20
+#define MSCC_PHY_ERR_LINK_DISCONNECT_CNT  21
+#define ERR_CNT_MASK                     GENMASK(7, 0)
 
 #define MSCC_PHY_EXT_PHY_CNTL_1           23
 #define MAC_IF_SELECTION_MASK             0x1800
@@ -39,7 +50,22 @@ enum rgmii_rx_clock_delay {
 #define MAC_IF_SELECTION_RMII             1
 #define MAC_IF_SELECTION_RGMII            2
 #define MAC_IF_SELECTION_POS              11
+#define VSC8584_MAC_IF_SELECTION_MASK     0x1000
+#define VSC8584_MAC_IF_SELECTION_SGMII    0
+#define VSC8584_MAC_IF_SELECTION_1000BASEX 1
+#define VSC8584_MAC_IF_SELECTION_POS      12
 #define FAR_END_LOOPBACK_MODE_MASK        0x0008
+#define MEDIA_OP_MODE_MASK               0x0700
+#define MEDIA_OP_MODE_COPPER             0
+#define MEDIA_OP_MODE_SERDES             1
+#define MEDIA_OP_MODE_1000BASEX                  2
+#define MEDIA_OP_MODE_100BASEFX                  3
+#define MEDIA_OP_MODE_AMS_COPPER_SERDES          5
+#define MEDIA_OP_MODE_AMS_COPPER_1000BASEX     6
+#define MEDIA_OP_MODE_AMS_COPPER_100BASEFX     7
+#define MEDIA_OP_MODE_POS                8
+
+#define MSCC_PHY_EXT_PHY_CNTL_2                  24
 
 #define MII_VSC85XX_INT_MASK             25
 #define MII_VSC85XX_INT_MASK_MASK        0xa000
@@ -62,19 +88,40 @@ enum rgmii_rx_clock_delay {
 #define MSCC_PHY_PAGE_STANDARD           0x0000 /* Standard registers */
 #define MSCC_PHY_PAGE_EXTENDED           0x0001 /* Extended registers */
 #define MSCC_PHY_PAGE_EXTENDED_2         0x0002 /* Extended reg - page 2 */
+#define MSCC_PHY_PAGE_EXTENDED_3         0x0003 /* Extended reg - page 3 */
+#define MSCC_PHY_PAGE_EXTENDED_4         0x0004 /* Extended reg - page 4 */
+/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs
+ * in the same package.
+ */
+#define MSCC_PHY_PAGE_EXTENDED_GPIO      0x0010 /* Extended reg - GPIO */
+#define MSCC_PHY_PAGE_TEST               0x2a30 /* Test reg */
+#define MSCC_PHY_PAGE_TR                 0x52b5 /* Token ring registers */
 
 /* Extended Page 1 Registers */
+#define MSCC_PHY_CU_MEDIA_CRC_VALID_CNT          18
+#define VALID_CRC_CNT_CRC_MASK           GENMASK(13, 0)
+
 #define MSCC_PHY_EXT_MODE_CNTL           19
 #define FORCE_MDI_CROSSOVER_MASK         0x000C
 #define FORCE_MDI_CROSSOVER_MDIX         0x000C
 #define FORCE_MDI_CROSSOVER_MDI                  0x0008
 
 #define MSCC_PHY_ACTIPHY_CNTL            20
+#define PHY_ADDR_REVERSED                0x0200
 #define DOWNSHIFT_CNTL_MASK              0x001C
 #define DOWNSHIFT_EN                     0x0010
 #define DOWNSHIFT_CNTL_POS               2
 
+#define MSCC_PHY_EXT_PHY_CNTL_4                  23
+#define PHY_CNTL_4_ADDR_POS              11
+
+#define MSCC_PHY_VERIPHY_CNTL_2                  25
+
+#define MSCC_PHY_VERIPHY_CNTL_3                  26
+
 /* Extended Page 2 Registers */
+#define MSCC_PHY_CU_PMD_TX_CNTL                  16
+
 #define MSCC_PHY_RGMII_CNTL              20
 #define RGMII_RX_CLK_DELAY_MASK                  0x0070
 #define RGMII_RX_CLK_DELAY_POS           4
@@ -90,11 +137,90 @@ enum rgmii_rx_clock_delay {
 #define SECURE_ON_ENABLE                 0x8000
 #define SECURE_ON_PASSWD_LEN_4           0x4000
 
+/* Extended Page 3 Registers */
+#define MSCC_PHY_SERDES_TX_VALID_CNT     21
+#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT   22
+#define MSCC_PHY_SERDES_RX_VALID_CNT     28
+#define MSCC_PHY_SERDES_RX_CRC_ERR_CNT   29
+
+/* Extended page GPIO Registers */
+#define MSCC_DW8051_CNTL_STATUS                  0
+#define MICRO_NSOFT_RESET                0x8000
+#define RUN_FROM_INT_ROM                 0x4000
+#define AUTOINC_ADDR                     0x2000
+#define PATCH_RAM_CLK                    0x1000
+#define MICRO_PATCH_EN                   0x0080
+#define DW8051_CLK_EN                    0x0010
+#define MICRO_CLK_EN                     0x0008
+#define MICRO_CLK_DIVIDE(x)              ((x) >> 1)
+#define MSCC_DW8051_VLD_MASK             0xf1ff
+
+/* x Address in range 1-4 */
+#define MSCC_TRAP_ROM_ADDR(x)            ((x) * 2 + 1)
+#define MSCC_PATCH_RAM_ADDR(x)           (((x) + 1) * 2)
+#define MSCC_INT_MEM_ADDR                11
+
+#define MSCC_INT_MEM_CNTL                12
+#define READ_SFR                         0x6000
+#define READ_PRAM                        0x4000
+#define READ_ROM                         0x2000
+#define READ_RAM                         0x0000
+#define INT_MEM_WRITE_EN                 0x1000
+#define EN_PATCH_RAM_TRAP_ADDR(x)        (0x0100 << ((x) - 1))
+#define INT_MEM_DATA_M                   0x00ff
+#define INT_MEM_DATA(x)                          (INT_MEM_DATA_M & (x))
+
+#define MSCC_PHY_PROC_CMD                18
+#define PROC_CMD_NCOMPLETED              0x8000
+#define PROC_CMD_FAILED                          0x4000
+#define PROC_CMD_SGMII_PORT(x)           ((x) << 8)
+#define PROC_CMD_FIBER_PORT(x)           (0x0100 << (x) % 4)
+#define PROC_CMD_QSGMII_PORT             0x0c00
+#define PROC_CMD_RST_CONF_PORT           0x0080
+#define PROC_CMD_RECONF_PORT             0x0000
+#define PROC_CMD_READ_MOD_WRITE_PORT     0x0040
+#define PROC_CMD_WRITE                   0x0040
+#define PROC_CMD_READ                    0x0000
+#define PROC_CMD_FIBER_DISABLE           0x0020
+#define PROC_CMD_FIBER_100BASE_FX        0x0010
+#define PROC_CMD_FIBER_1000BASE_X        0x0000
+#define PROC_CMD_SGMII_MAC               0x0030
+#define PROC_CMD_QSGMII_MAC              0x0020
+#define PROC_CMD_NO_MAC_CONF             0x0000
+#define PROC_CMD_1588_DEFAULT_INIT       0x0010
+#define PROC_CMD_NOP                     0x000f
+#define PROC_CMD_PHY_INIT                0x000a
+#define PROC_CMD_CRC16                   0x0008
+#define PROC_CMD_FIBER_MEDIA_CONF        0x0001
+#define PROC_CMD_MCB_ACCESS_MAC_CONF     0x0000
+#define PROC_CMD_NCOMPLETED_TIMEOUT_MS    500
+
+#define MSCC_PHY_MAC_CFG_FASTLINK        19
+#define MAC_CFG_MASK                     0xc000
+#define MAC_CFG_SGMII                    0x0000
+#define MAC_CFG_QSGMII                   0x4000
+
+/* Test page Registers */
+#define MSCC_PHY_TEST_PAGE_5             5
+#define MSCC_PHY_TEST_PAGE_8             8
+#define MSCC_PHY_TEST_PAGE_9             9
+#define MSCC_PHY_TEST_PAGE_20            20
+#define MSCC_PHY_TEST_PAGE_24            24
+
+/* Token ring page Registers */
+#define MSCC_PHY_TR_CNTL                 16
+#define TR_WRITE                         0x8000
+#define TR_ADDR(x)                       (0x7fff & (x))
+#define MSCC_PHY_TR_LSB                          17
+#define MSCC_PHY_TR_MSB                          18
+
 /* Microsemi PHY ID's */
 #define PHY_ID_VSC8530                   0x00070560
 #define PHY_ID_VSC8531                   0x00070570
 #define PHY_ID_VSC8540                   0x00070760
 #define PHY_ID_VSC8541                   0x00070770
+#define PHY_ID_VSC8574                   0x000704a0
+#define PHY_ID_VSC8584                   0x000707c0
 
 #define MSCC_VDDMAC_1500                 1500
 #define MSCC_VDDMAC_1800                 1800
@@ -104,6 +230,24 @@ enum rgmii_rx_clock_delay {
 #define DOWNSHIFT_COUNT_MAX              5
 
 #define MAX_LEDS                         4
+
+#define VSC8584_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
+                               BIT(VSC8531_LINK_1000_ACTIVITY) | \
+                               BIT(VSC8531_LINK_100_ACTIVITY) | \
+                               BIT(VSC8531_LINK_10_ACTIVITY) | \
+                               BIT(VSC8531_LINK_100_1000_ACTIVITY) | \
+                               BIT(VSC8531_LINK_10_1000_ACTIVITY) | \
+                               BIT(VSC8531_LINK_10_100_ACTIVITY) | \
+                               BIT(VSC8584_LINK_100FX_1000X_ACTIVITY) | \
+                               BIT(VSC8531_DUPLEX_COLLISION) | \
+                               BIT(VSC8531_COLLISION) | \
+                               BIT(VSC8531_ACTIVITY) | \
+                               BIT(VSC8584_100FX_1000X_ACTIVITY) | \
+                               BIT(VSC8531_AUTONEG_FAULT) | \
+                               BIT(VSC8531_SERIAL_MODE) | \
+                               BIT(VSC8531_FORCE_LED_OFF) | \
+                               BIT(VSC8531_FORCE_LED_ON))
+
 #define VSC85XX_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
                                BIT(VSC8531_LINK_1000_ACTIVITY) | \
                                BIT(VSC8531_LINK_100_ACTIVITY) | \
@@ -119,11 +263,120 @@ enum rgmii_rx_clock_delay {
                                BIT(VSC8531_FORCE_LED_OFF) | \
                                BIT(VSC8531_FORCE_LED_ON))
 
+#define MSCC_VSC8584_REVB_INT8051_FW           "mscc_vsc8584_revb_int8051_fb48.bin"
+#define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR        0xe800
+#define MSCC_VSC8584_REVB_INT8051_FW_CRC       0xfb48
+
+#define MSCC_VSC8574_REVB_INT8051_FW           "mscc_vsc8574_revb_int8051_29e8.bin"
+#define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR        0x4000
+#define MSCC_VSC8574_REVB_INT8051_FW_CRC       0x29e8
+
+#define VSC8584_REVB                           0x0001
+#define MSCC_DEV_REV_MASK                      GENMASK(3, 0)
+
+struct reg_val {
+       u16     reg;
+       u32     val;
+};
+
+struct vsc85xx_hw_stat {
+       const char *string;
+       u8 reg;
+       u16 page;
+       u16 mask;
+};
+
+static const struct vsc85xx_hw_stat vsc85xx_hw_stats[] = {
+       {
+               .string = "phy_receive_errors",
+               .reg    = MSCC_PHY_ERR_RX_CNT,
+               .page   = MSCC_PHY_PAGE_STANDARD,
+               .mask   = ERR_CNT_MASK,
+       }, {
+               .string = "phy_false_carrier",
+               .reg    = MSCC_PHY_ERR_FALSE_CARRIER_CNT,
+               .page   = MSCC_PHY_PAGE_STANDARD,
+               .mask   = ERR_CNT_MASK,
+       }, {
+               .string = "phy_cu_media_link_disconnect",
+               .reg    = MSCC_PHY_ERR_LINK_DISCONNECT_CNT,
+               .page   = MSCC_PHY_PAGE_STANDARD,
+               .mask   = ERR_CNT_MASK,
+       }, {
+               .string = "phy_cu_media_crc_good_count",
+               .reg    = MSCC_PHY_CU_MEDIA_CRC_VALID_CNT,
+               .page   = MSCC_PHY_PAGE_EXTENDED,
+               .mask   = VALID_CRC_CNT_CRC_MASK,
+       }, {
+               .string = "phy_cu_media_crc_error_count",
+               .reg    = MSCC_PHY_EXT_PHY_CNTL_4,
+               .page   = MSCC_PHY_PAGE_EXTENDED,
+               .mask   = ERR_CNT_MASK,
+       },
+};
+
+static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = {
+       {
+               .string = "phy_receive_errors",
+               .reg    = MSCC_PHY_ERR_RX_CNT,
+               .page   = MSCC_PHY_PAGE_STANDARD,
+               .mask   = ERR_CNT_MASK,
+       }, {
+               .string = "phy_false_carrier",
+               .reg    = MSCC_PHY_ERR_FALSE_CARRIER_CNT,
+               .page   = MSCC_PHY_PAGE_STANDARD,
+               .mask   = ERR_CNT_MASK,
+       }, {
+               .string = "phy_cu_media_link_disconnect",
+               .reg    = MSCC_PHY_ERR_LINK_DISCONNECT_CNT,
+               .page   = MSCC_PHY_PAGE_STANDARD,
+               .mask   = ERR_CNT_MASK,
+       }, {
+               .string = "phy_cu_media_crc_good_count",
+               .reg    = MSCC_PHY_CU_MEDIA_CRC_VALID_CNT,
+               .page   = MSCC_PHY_PAGE_EXTENDED,
+               .mask   = VALID_CRC_CNT_CRC_MASK,
+       }, {
+               .string = "phy_cu_media_crc_error_count",
+               .reg    = MSCC_PHY_EXT_PHY_CNTL_4,
+               .page   = MSCC_PHY_PAGE_EXTENDED,
+               .mask   = ERR_CNT_MASK,
+       }, {
+               .string = "phy_serdes_tx_good_pkt_count",
+               .reg    = MSCC_PHY_SERDES_TX_VALID_CNT,
+               .page   = MSCC_PHY_PAGE_EXTENDED_3,
+               .mask   = VALID_CRC_CNT_CRC_MASK,
+       }, {
+               .string = "phy_serdes_tx_bad_crc_count",
+               .reg    = MSCC_PHY_SERDES_TX_CRC_ERR_CNT,
+               .page   = MSCC_PHY_PAGE_EXTENDED_3,
+               .mask   = ERR_CNT_MASK,
+       }, {
+               .string = "phy_serdes_rx_good_pkt_count",
+               .reg    = MSCC_PHY_SERDES_RX_VALID_CNT,
+               .page   = MSCC_PHY_PAGE_EXTENDED_3,
+               .mask   = VALID_CRC_CNT_CRC_MASK,
+       }, {
+               .string = "phy_serdes_rx_bad_crc_count",
+               .reg    = MSCC_PHY_SERDES_RX_CRC_ERR_CNT,
+               .page   = MSCC_PHY_PAGE_EXTENDED_3,
+               .mask   = ERR_CNT_MASK,
+       },
+};
+
 struct vsc8531_private {
        int rate_magic;
        u16 supp_led_modes;
        u32 leds_mode[MAX_LEDS];
        u8 nleds;
+       const struct vsc85xx_hw_stat *hw_stats;
+       u64 *stats;
+       int nstats;
+       bool pkg_init;
+       /* For multiple port PHYs; the MDIO address of the base PHY in the
+        * package.
+        */
+       unsigned int base_addr;
 };
 
 #ifdef CONFIG_OF_MDIO
@@ -140,12 +393,66 @@ static const struct vsc8531_edge_rate_table edge_table[] = {
 };
 #endif /* CONFIG_OF_MDIO */
 
-static int vsc85xx_phy_page_set(struct phy_device *phydev, u16 page)
+static int vsc85xx_phy_read_page(struct phy_device *phydev)
 {
-       int rc;
+       return __phy_read(phydev, MSCC_EXT_PAGE_ACCESS);
+}
 
-       rc = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page);
-       return rc;
+static int vsc85xx_phy_write_page(struct phy_device *phydev, int page)
+{
+       return __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page);
+}
+
+static int vsc85xx_get_sset_count(struct phy_device *phydev)
+{
+       struct vsc8531_private *priv = phydev->priv;
+
+       if (!priv)
+               return 0;
+
+       return priv->nstats;
+}
+
+static void vsc85xx_get_strings(struct phy_device *phydev, u8 *data)
+{
+       struct vsc8531_private *priv = phydev->priv;
+       int i;
+
+       if (!priv)
+               return;
+
+       for (i = 0; i < priv->nstats; i++)
+               strlcpy(data + i * ETH_GSTRING_LEN, priv->hw_stats[i].string,
+                       ETH_GSTRING_LEN);
+}
+
+static u64 vsc85xx_get_stat(struct phy_device *phydev, int i)
+{
+       struct vsc8531_private *priv = phydev->priv;
+       int val;
+
+       val = phy_read_paged(phydev, priv->hw_stats[i].page,
+                            priv->hw_stats[i].reg);
+       if (val < 0)
+               return U64_MAX;
+
+       val = val & priv->hw_stats[i].mask;
+       priv->stats[i] += val;
+
+       return priv->stats[i];
+}
+
+static void vsc85xx_get_stats(struct phy_device *phydev,
+                             struct ethtool_stats *stats, u64 *data)
+{
+       struct vsc8531_private *priv = phydev->priv;
+       int i;
+
+       if (!priv)
+               return;
+
+       for (i = 0; i < priv->nstats; i++)
+               data[i] = vsc85xx_get_stat(phydev, i);
 }
 
 static int vsc85xx_led_cntl_set(struct phy_device *phydev,
@@ -184,7 +491,7 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix)
        u16 reg_val;
 
        reg_val = phy_read(phydev, MSCC_PHY_BYPASS_CONTROL);
-       if ((mdix == ETH_TP_MDI) || (mdix == ETH_TP_MDI_X)) {
+       if (mdix == ETH_TP_MDI || mdix == ETH_TP_MDI_X) {
                reg_val |= (DISABLE_PAIR_SWAP_CORR_MASK |
                            DISABLE_POLARITY_CORR_MASK  |
                            DISABLE_HP_AUTO_MDIX_MASK);
@@ -194,25 +501,20 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix)
                             DISABLE_HP_AUTO_MDIX_MASK);
        }
        rc = phy_write(phydev, MSCC_PHY_BYPASS_CONTROL, reg_val);
-       if (rc != 0)
+       if (rc)
                return rc;
 
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED);
-       if (rc != 0)
-               return rc;
+       reg_val = 0;
 
-       reg_val = phy_read(phydev, MSCC_PHY_EXT_MODE_CNTL);
-       reg_val &= ~(FORCE_MDI_CROSSOVER_MASK);
        if (mdix == ETH_TP_MDI)
-               reg_val |= FORCE_MDI_CROSSOVER_MDI;
+               reg_val = FORCE_MDI_CROSSOVER_MDI;
        else if (mdix == ETH_TP_MDI_X)
-               reg_val |= FORCE_MDI_CROSSOVER_MDIX;
-       rc = phy_write(phydev, MSCC_PHY_EXT_MODE_CNTL, reg_val);
-       if (rc != 0)
-               return rc;
+               reg_val = FORCE_MDI_CROSSOVER_MDIX;
 
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
-       if (rc != 0)
+       rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED,
+                             MSCC_PHY_EXT_MODE_CNTL, FORCE_MDI_CROSSOVER_MASK,
+                             reg_val);
+       if (rc < 0)
                return rc;
 
        return genphy_restart_aneg(phydev);
@@ -220,30 +522,24 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix)
 
 static int vsc85xx_downshift_get(struct phy_device *phydev, u8 *count)
 {
-       int rc;
        u16 reg_val;
 
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED);
-       if (rc != 0)
-               goto out;
+       reg_val = phy_read_paged(phydev, MSCC_PHY_PAGE_EXTENDED,
+                                MSCC_PHY_ACTIPHY_CNTL);
+       if (reg_val < 0)
+               return reg_val;
 
-       reg_val = phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
        reg_val &= DOWNSHIFT_CNTL_MASK;
        if (!(reg_val & DOWNSHIFT_EN))
                *count = DOWNSHIFT_DEV_DISABLE;
        else
                *count = ((reg_val & ~DOWNSHIFT_EN) >> DOWNSHIFT_CNTL_POS) + 2;
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
 
-out:
-       return rc;
+       return 0;
 }
 
 static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count)
 {
-       int rc;
-       u16 reg_val;
-
        if (count == DOWNSHIFT_DEV_DEFAULT_COUNT) {
                /* Default downshift count 3 (i.e. Bit3:2 = 0b01) */
                count = ((1 << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN);
@@ -255,21 +551,9 @@ static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count)
                count = (((count - 2) << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN);
        }
 
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED);
-       if (rc != 0)
-               goto out;
-
-       reg_val = phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
-       reg_val &= ~(DOWNSHIFT_CNTL_MASK);
-       reg_val |= count;
-       rc = phy_write(phydev, MSCC_PHY_ACTIPHY_CNTL, reg_val);
-       if (rc != 0)
-               goto out;
-
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
-
-out:
-       return rc;
+       return phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED,
+                               MSCC_PHY_ACTIPHY_CNTL, DOWNSHIFT_CNTL_MASK,
+                               count);
 }
 
 static int vsc85xx_wol_set(struct phy_device *phydev,
@@ -283,46 +567,48 @@ static int vsc85xx_wol_set(struct phy_device *phydev,
        u8 *mac_addr = phydev->attached_dev->dev_addr;
 
        mutex_lock(&phydev->lock);
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
-       if (rc != 0)
+       rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+       if (rc < 0) {
+               rc = phy_restore_page(phydev, rc, rc);
                goto out_unlock;
+       }
 
        if (wol->wolopts & WAKE_MAGIC) {
                /* Store the device address for the magic packet */
                for (i = 0; i < ARRAY_SIZE(pwd); i++)
                        pwd[i] = mac_addr[5 - (i * 2 + 1)] << 8 |
                                 mac_addr[5 - i * 2];
-               phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]);
-               phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]);
-               phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]);
+               __phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]);
+               __phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]);
+               __phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]);
        } else {
-               phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0);
-               phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0);
-               phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0);
+               __phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0);
+               __phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0);
+               __phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0);
        }
 
        if (wol_conf->wolopts & WAKE_MAGICSECURE) {
                for (i = 0; i < ARRAY_SIZE(pwd); i++)
                        pwd[i] = wol_conf->sopass[5 - (i * 2 + 1)] << 8 |
                                 wol_conf->sopass[5 - i * 2];
-               phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]);
-               phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]);
-               phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]);
+               __phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]);
+               __phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]);
+               __phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]);
        } else {
-               phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0);
-               phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0);
-               phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0);
+               __phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0);
+               __phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0);
+               __phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0);
        }
 
-       reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
+       reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
        if (wol_conf->wolopts & WAKE_MAGICSECURE)
                reg_val |= SECURE_ON_ENABLE;
        else
                reg_val &= ~SECURE_ON_ENABLE;
-       phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
+       __phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
 
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
-       if (rc != 0)
+       rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
+       if (rc < 0)
                goto out_unlock;
 
        if (wol->wolopts & WAKE_MAGIC) {
@@ -330,14 +616,14 @@ static int vsc85xx_wol_set(struct phy_device *phydev,
                reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
                reg_val |= MII_VSC85XX_INT_MASK_WOL;
                rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
-               if (rc != 0)
+               if (rc)
                        goto out_unlock;
        } else {
                /* Disable the WOL interrupt */
                reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
                reg_val &= (~MII_VSC85XX_INT_MASK_WOL);
                rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
-               if (rc != 0)
+               if (rc)
                        goto out_unlock;
        }
        /* Clear WOL iterrupt status */
@@ -359,17 +645,17 @@ static void vsc85xx_wol_get(struct phy_device *phydev,
        struct ethtool_wolinfo *wol_conf = wol;
 
        mutex_lock(&phydev->lock);
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
-       if (rc != 0)
+       rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+       if (rc < 0)
                goto out_unlock;
 
-       reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
+       reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
        if (reg_val & SECURE_ON_ENABLE)
                wol_conf->wolopts |= WAKE_MAGICSECURE;
        if (wol_conf->wolopts & WAKE_MAGICSECURE) {
-               pwd[0] = phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD);
-               pwd[1] = phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD);
-               pwd[2] = phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD);
+               pwd[0] = __phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD);
+               pwd[1] = __phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD);
+               pwd[2] = __phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD);
                for (i = 0; i < ARRAY_SIZE(pwd); i++) {
                        wol_conf->sopass[5 - i * 2] = pwd[i] & 0x00ff;
                        wol_conf->sopass[5 - (i * 2 + 1)] = (pwd[i] & 0xff00)
@@ -377,9 +663,8 @@ static void vsc85xx_wol_get(struct phy_device *phydev,
                }
        }
 
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
-
 out_unlock:
+       phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
        mutex_unlock(&phydev->lock);
 }
 
@@ -387,7 +672,7 @@ out_unlock:
 static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
 {
        u32 vdd, sd;
-       int rc, i, j;
+       int i, j;
        struct device *dev = &phydev->mdio.dev;
        struct device_node *of_node = dev->of_node;
        u8 sd_array_size = ARRAY_SIZE(edge_table[0].slowdown);
@@ -395,12 +680,10 @@ static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
        if (!of_node)
                return -ENODEV;
 
-       rc = of_property_read_u32(of_node, "vsc8531,vddmac", &vdd);
-       if (rc != 0)
+       if (of_property_read_u32(of_node, "vsc8531,vddmac", &vdd))
                vdd = MSCC_VDDMAC_3300;
 
-       rc = of_property_read_u32(of_node, "vsc8531,edge-slowdown", &sd);
-       if (rc != 0)
+       if (of_property_read_u32(of_node, "vsc8531,edge-slowdown", &sd))
                sd = 0;
 
        for (i = 0; i < ARRAY_SIZE(edge_table); i++)
@@ -453,7 +736,7 @@ static int vsc85xx_dt_led_modes_get(struct phy_device *phydev,
                                    u32 *default_mode)
 {
        struct vsc8531_private *priv = phydev->priv;
-       char led_dt_prop[19];
+       char led_dt_prop[28];
        int i, ret;
 
        for (i = 0; i < priv->nleds; i++) {
@@ -474,21 +757,11 @@ static int vsc85xx_dt_led_modes_get(struct phy_device *phydev,
 static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev, u8 edge_rate)
 {
        int rc;
-       u16 reg_val;
 
        mutex_lock(&phydev->lock);
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
-       if (rc != 0)
-               goto out_unlock;
-       reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
-       reg_val &= ~(EDGE_RATE_CNTL_MASK);
-       reg_val |= (edge_rate << EDGE_RATE_CNTL_POS);
-       rc = phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
-       if (rc != 0)
-               goto out_unlock;
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
-
-out_unlock:
+       rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
+                             MSCC_PHY_WOL_MAC_CONTROL, EDGE_RATE_CNTL_MASK,
+                             edge_rate << EDGE_RATE_CNTL_POS);
        mutex_unlock(&phydev->lock);
 
        return rc;
@@ -519,7 +792,7 @@ static int vsc85xx_mac_if_set(struct phy_device *phydev,
                goto out_unlock;
        }
        rc = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, reg_val);
-       if (rc != 0)
+       if (rc)
                goto out_unlock;
 
        rc = genphy_soft_reset(phydev);
@@ -537,17 +810,17 @@ static int vsc85xx_default_config(struct phy_device *phydev)
 
        phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
        mutex_lock(&phydev->lock);
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
-       if (rc != 0)
+       rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+       if (rc < 0)
                goto out_unlock;
 
        reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL);
        reg_val &= ~(RGMII_RX_CLK_DELAY_MASK);
        reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS);
        phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val);
-       rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
 
 out_unlock:
+       rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
        mutex_unlock(&phydev->lock);
 
        return rc;
@@ -576,6 +849,809 @@ static int vsc85xx_set_tunable(struct phy_device *phydev,
        }
 }
 
+/* mdiobus lock should be locked when using this function */
+static void vsc85xx_tr_write(struct phy_device *phydev, u16 addr, u32 val)
+{
+       __phy_write(phydev, MSCC_PHY_TR_MSB, val >> 16);
+       __phy_write(phydev, MSCC_PHY_TR_LSB, val & GENMASK(15, 0));
+       __phy_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr));
+}
+
+static int vsc85xx_eee_init_seq_set(struct phy_device *phydev)
+{
+       const struct reg_val init_eee[] = {
+               {0x0f82, 0x0012b00a},
+               {0x1686, 0x00000004},
+               {0x168c, 0x00d2c46f},
+               {0x17a2, 0x00000620},
+               {0x16a0, 0x00eeffdd},
+               {0x16a6, 0x00071448},
+               {0x16a4, 0x0013132f},
+               {0x16a8, 0x00000000},
+               {0x0ffc, 0x00c0a028},
+               {0x0fe8, 0x0091b06c},
+               {0x0fea, 0x00041600},
+               {0x0f80, 0x00000af4},
+               {0x0fec, 0x00901809},
+               {0x0fee, 0x0000a6a1},
+               {0x0ffe, 0x00b01007},
+               {0x16b0, 0x00eeff00},
+               {0x16b2, 0x00007000},
+               {0x16b4, 0x00000814},
+       };
+       unsigned int i;
+       int oldpage;
+
+       mutex_lock(&phydev->lock);
+       oldpage = phy_select_page(phydev, MSCC_PHY_PAGE_TR);
+       if (oldpage < 0)
+               goto out_unlock;
+
+       for (i = 0; i < ARRAY_SIZE(init_eee); i++)
+               vsc85xx_tr_write(phydev, init_eee[i].reg, init_eee[i].val);
+
+out_unlock:
+       oldpage = phy_restore_page(phydev, oldpage, oldpage);
+       mutex_unlock(&phydev->lock);
+
+       return oldpage;
+}
+
+/* phydev->bus->mdio_lock should be locked when using this function */
+static int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
+{
+       struct vsc8531_private *priv = phydev->priv;
+
+       if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
+               dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
+               dump_stack();
+       }
+
+       return __mdiobus_write(phydev->mdio.bus, priv->base_addr, regnum, val);
+}
+
+/* phydev->bus->mdio_lock should be locked when using this function */
+static int phy_base_read(struct phy_device *phydev, u32 regnum)
+{
+       struct vsc8531_private *priv = phydev->priv;
+
+       if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
+               dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
+               dump_stack();
+       }
+
+       return __mdiobus_read(phydev->mdio.bus, priv->base_addr, regnum);
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static void vsc8584_csr_write(struct phy_device *phydev, u16 addr, u32 val)
+{
+       phy_base_write(phydev, MSCC_PHY_TR_MSB, val >> 16);
+       phy_base_write(phydev, MSCC_PHY_TR_LSB, val & GENMASK(15, 0));
+       phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr));
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_cmd(struct phy_device *phydev, u16 val)
+{
+       unsigned long deadline;
+       u16 reg_val;
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                      MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+       phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_NCOMPLETED | val);
+
+       deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+       do {
+               reg_val = phy_base_read(phydev, MSCC_PHY_PROC_CMD);
+       } while (time_before(jiffies, deadline) &&
+                (reg_val & PROC_CMD_NCOMPLETED) &&
+                !(reg_val & PROC_CMD_FAILED));
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       if (reg_val & PROC_CMD_FAILED)
+               return -EIO;
+
+       if (reg_val & PROC_CMD_NCOMPLETED)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_micro_deassert_reset(struct phy_device *phydev,
+                                       bool patch_en)
+{
+       u32 enable, release;
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                      MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+       enable = RUN_FROM_INT_ROM | MICRO_CLK_EN | DW8051_CLK_EN;
+       release = MICRO_NSOFT_RESET | RUN_FROM_INT_ROM | DW8051_CLK_EN |
+               MICRO_CLK_EN;
+
+       if (patch_en) {
+               enable |= MICRO_PATCH_EN;
+               release |= MICRO_PATCH_EN;
+
+               /* Clear all patches */
+               phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_RAM);
+       }
+
+       /* Enable 8051 Micro clock; CLEAR/SET patch present; disable PRAM clock
+        * override and addr. auto-incr; operate at 125 MHz
+        */
+       phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, enable);
+       /* Release 8051 Micro SW reset */
+       phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, release);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       return 0;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_micro_assert_reset(struct phy_device *phydev)
+{
+       int ret;
+       u16 reg;
+
+       ret = vsc8584_cmd(phydev, PROC_CMD_NOP);
+       if (ret)
+               return ret;
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                      MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+       reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+       reg &= ~EN_PATCH_RAM_TRAP_ADDR(4);
+       phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
+
+       phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(4), 0x005b);
+       phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(4), 0x005b);
+
+       reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+       reg |= EN_PATCH_RAM_TRAP_ADDR(4);
+       phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
+
+       phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_NOP);
+
+       reg = phy_base_read(phydev, MSCC_DW8051_CNTL_STATUS);
+       reg &= ~MICRO_NSOFT_RESET;
+       phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, reg);
+
+       phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_MCB_ACCESS_MAC_CONF |
+                      PROC_CMD_SGMII_PORT(0) | PROC_CMD_NO_MAC_CONF |
+                      PROC_CMD_READ);
+
+       reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+       reg &= ~EN_PATCH_RAM_TRAP_ADDR(4);
+       phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       return 0;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_get_fw_crc(struct phy_device *phydev, u16 start, u16 size,
+                             u16 *crc)
+{
+       int ret;
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
+
+       phy_base_write(phydev, MSCC_PHY_VERIPHY_CNTL_2, start);
+       phy_base_write(phydev, MSCC_PHY_VERIPHY_CNTL_3, size);
+
+       /* Start Micro command */
+       ret = vsc8584_cmd(phydev, PROC_CMD_CRC16);
+       if (ret)
+               goto out;
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
+
+       *crc = phy_base_read(phydev, MSCC_PHY_VERIPHY_CNTL_2);
+
+out:
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       return ret;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_patch_fw(struct phy_device *phydev,
+                           const struct firmware *fw)
+{
+       int i, ret;
+
+       ret = vsc8584_micro_assert_reset(phydev);
+       if (ret) {
+               dev_err(&phydev->mdio.dev,
+                       "%s: failed to assert reset of micro\n", __func__);
+               return ret;
+       }
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                      MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+       /* Hold 8051 Micro in SW Reset, Enable auto incr address and patch clock
+        * Disable the 8051 Micro clock
+        */
+       phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, RUN_FROM_INT_ROM |
+                      AUTOINC_ADDR | PATCH_RAM_CLK | MICRO_CLK_EN |
+                      MICRO_CLK_DIVIDE(2));
+       phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_PRAM | INT_MEM_WRITE_EN |
+                      INT_MEM_DATA(2));
+       phy_base_write(phydev, MSCC_INT_MEM_ADDR, 0x0000);
+
+       for (i = 0; i < fw->size; i++)
+               phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_PRAM |
+                              INT_MEM_WRITE_EN | fw->data[i]);
+
+       /* Clear internal memory access */
+       phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_RAM);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       return 0;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static bool vsc8574_is_serdes_init(struct phy_device *phydev)
+{
+       u16 reg;
+       bool ret;
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                      MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+       reg = phy_base_read(phydev, MSCC_TRAP_ROM_ADDR(1));
+       if (reg != 0x3eb7) {
+               ret = false;
+               goto out;
+       }
+
+       reg = phy_base_read(phydev, MSCC_PATCH_RAM_ADDR(1));
+       if (reg != 0x4012) {
+               ret = false;
+               goto out;
+       }
+
+       reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+       if (reg != EN_PATCH_RAM_TRAP_ADDR(1)) {
+               ret = false;
+               goto out;
+       }
+
+       reg = phy_base_read(phydev, MSCC_DW8051_CNTL_STATUS);
+       if ((MICRO_NSOFT_RESET | RUN_FROM_INT_ROM |  DW8051_CLK_EN |
+            MICRO_CLK_EN) != (reg & MSCC_DW8051_VLD_MASK)) {
+               ret = false;
+               goto out;
+       }
+
+       ret = true;
+out:
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       return ret;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8574_config_pre_init(struct phy_device *phydev)
+{
+       const struct reg_val pre_init1[] = {
+               {0x0fae, 0x000401bd},
+               {0x0fac, 0x000f000f},
+               {0x17a0, 0x00a0f147},
+               {0x0fe4, 0x00052f54},
+               {0x1792, 0x0027303d},
+               {0x07fe, 0x00000704},
+               {0x0fe0, 0x00060150},
+               {0x0f82, 0x0012b00a},
+               {0x0f80, 0x00000d74},
+               {0x02e0, 0x00000012},
+               {0x03a2, 0x00050208},
+               {0x03b2, 0x00009186},
+               {0x0fb0, 0x000e3700},
+               {0x1688, 0x00049f81},
+               {0x0fd2, 0x0000ffff},
+               {0x168a, 0x00039fa2},
+               {0x1690, 0x0020640b},
+               {0x0258, 0x00002220},
+               {0x025a, 0x00002a20},
+               {0x025c, 0x00003060},
+               {0x025e, 0x00003fa0},
+               {0x03a6, 0x0000e0f0},
+               {0x0f92, 0x00001489},
+               {0x16a2, 0x00007000},
+               {0x16a6, 0x00071448},
+               {0x16a0, 0x00eeffdd},
+               {0x0fe8, 0x0091b06c},
+               {0x0fea, 0x00041600},
+               {0x16b0, 0x00eeff00},
+               {0x16b2, 0x00007000},
+               {0x16b4, 0x00000814},
+               {0x0f90, 0x00688980},
+               {0x03a4, 0x0000d8f0},
+               {0x0fc0, 0x00000400},
+               {0x07fa, 0x0050100f},
+               {0x0796, 0x00000003},
+               {0x07f8, 0x00c3ff98},
+               {0x0fa4, 0x0018292a},
+               {0x168c, 0x00d2c46f},
+               {0x17a2, 0x00000620},
+               {0x16a4, 0x0013132f},
+               {0x16a8, 0x00000000},
+               {0x0ffc, 0x00c0a028},
+               {0x0fec, 0x00901c09},
+               {0x0fee, 0x0004a6a1},
+               {0x0ffe, 0x00b01807},
+       };
+       const struct reg_val pre_init2[] = {
+               {0x0486, 0x0008a518},
+               {0x0488, 0x006dc696},
+               {0x048a, 0x00000912},
+               {0x048e, 0x00000db6},
+               {0x049c, 0x00596596},
+               {0x049e, 0x00000514},
+               {0x04a2, 0x00410280},
+               {0x04a4, 0x00000000},
+               {0x04a6, 0x00000000},
+               {0x04a8, 0x00000000},
+               {0x04aa, 0x00000000},
+               {0x04ae, 0x007df7dd},
+               {0x04b0, 0x006d95d4},
+               {0x04b2, 0x00492410},
+       };
+       struct device *dev = &phydev->mdio.dev;
+       const struct firmware *fw;
+       unsigned int i;
+       u16 crc, reg;
+       bool serdes_init;
+       int ret;
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       /* all writes below are broadcasted to all PHYs in the same package */
+       reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+       reg |= SMI_BROADCAST_WR_EN;
+       phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+       phy_base_write(phydev, MII_VSC85XX_INT_MASK, 0);
+
+       /* The below register writes are tweaking analog and electrical
+        * configuration that were determined through characterization by PHY
+        * engineers. These don't mean anything more than "these are the best
+        * values".
+        */
+       phy_base_write(phydev, MSCC_PHY_EXT_PHY_CNTL_2, 0x0040);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+       phy_base_write(phydev, MSCC_PHY_TEST_PAGE_20, 0x4320);
+       phy_base_write(phydev, MSCC_PHY_TEST_PAGE_24, 0x0c00);
+       phy_base_write(phydev, MSCC_PHY_TEST_PAGE_9, 0x18ca);
+       phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1b20);
+
+       reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+       reg |= 0x8000;
+       phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+       for (i = 0; i < ARRAY_SIZE(pre_init1); i++)
+               vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2);
+
+       phy_base_write(phydev, MSCC_PHY_CU_PMD_TX_CNTL, 0x028e);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+       for (i = 0; i < ARRAY_SIZE(pre_init2); i++)
+               vsc8584_csr_write(phydev, pre_init2[i].reg, pre_init2[i].val);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+       reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+       reg &= ~0x8000;
+       phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       /* end of write broadcasting */
+       reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+       reg &= ~SMI_BROADCAST_WR_EN;
+       phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+       ret = request_firmware(&fw, MSCC_VSC8574_REVB_INT8051_FW, dev);
+       if (ret) {
+               dev_err(dev, "failed to load firmware %s, ret: %d\n",
+                       MSCC_VSC8574_REVB_INT8051_FW, ret);
+               return ret;
+       }
+
+       /* Add one byte to size for the one added by the patch_fw function */
+       ret = vsc8584_get_fw_crc(phydev,
+                                MSCC_VSC8574_REVB_INT8051_FW_START_ADDR,
+                                fw->size + 1, &crc);
+       if (ret)
+               goto out;
+
+       if (crc == MSCC_VSC8574_REVB_INT8051_FW_CRC) {
+               serdes_init = vsc8574_is_serdes_init(phydev);
+
+               if (!serdes_init) {
+                       ret = vsc8584_micro_assert_reset(phydev);
+                       if (ret) {
+                               dev_err(dev,
+                                       "%s: failed to assert reset of micro\n",
+                                       __func__);
+                               return ret;
+                       }
+               }
+       } else {
+               dev_dbg(dev, "FW CRC is not the expected one, patching FW\n");
+
+               serdes_init = false;
+
+               if (vsc8584_patch_fw(phydev, fw))
+                       dev_warn(dev,
+                                "failed to patch FW, expect non-optimal device\n");
+       }
+
+       if (!serdes_init) {
+               phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                              MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+               phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), 0x3eb7);
+               phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), 0x4012);
+               phy_base_write(phydev, MSCC_INT_MEM_CNTL,
+                              EN_PATCH_RAM_TRAP_ADDR(1));
+
+               vsc8584_micro_deassert_reset(phydev, false);
+
+               /* Add one byte to size for the one added by the patch_fw
+                * function
+                */
+               ret = vsc8584_get_fw_crc(phydev,
+                                        MSCC_VSC8574_REVB_INT8051_FW_START_ADDR,
+                                        fw->size + 1, &crc);
+               if (ret)
+                       goto out;
+
+               if (crc != MSCC_VSC8574_REVB_INT8051_FW_CRC)
+                       dev_warn(dev,
+                                "FW CRC after patching is not the expected one, expect non-optimal device\n");
+       }
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                      MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+       ret = vsc8584_cmd(phydev, PROC_CMD_1588_DEFAULT_INIT |
+                         PROC_CMD_PHY_INIT);
+
+out:
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       release_firmware(fw);
+
+       return ret;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_config_pre_init(struct phy_device *phydev)
+{
+       const struct reg_val pre_init1[] = {
+               {0x07fa, 0x0050100f},
+               {0x1688, 0x00049f81},
+               {0x0f90, 0x00688980},
+               {0x03a4, 0x0000d8f0},
+               {0x0fc0, 0x00000400},
+               {0x0f82, 0x0012b002},
+               {0x1686, 0x00000004},
+               {0x168c, 0x00d2c46f},
+               {0x17a2, 0x00000620},
+               {0x16a0, 0x00eeffdd},
+               {0x16a6, 0x00071448},
+               {0x16a4, 0x0013132f},
+               {0x16a8, 0x00000000},
+               {0x0ffc, 0x00c0a028},
+               {0x0fe8, 0x0091b06c},
+               {0x0fea, 0x00041600},
+               {0x0f80, 0x00fffaff},
+               {0x0fec, 0x00901809},
+               {0x0ffe, 0x00b01007},
+               {0x16b0, 0x00eeff00},
+               {0x16b2, 0x00007000},
+               {0x16b4, 0x00000814},
+       };
+       const struct reg_val pre_init2[] = {
+               {0x0486, 0x0008a518},
+               {0x0488, 0x006dc696},
+               {0x048a, 0x00000912},
+       };
+       const struct firmware *fw;
+       struct device *dev = &phydev->mdio.dev;
+       unsigned int i;
+       u16 crc, reg;
+       int ret;
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       /* all writes below are broadcasted to all PHYs in the same package */
+       reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+       reg |= SMI_BROADCAST_WR_EN;
+       phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+       phy_base_write(phydev, MII_VSC85XX_INT_MASK, 0);
+
+       reg = phy_base_read(phydev,  MSCC_PHY_BYPASS_CONTROL);
+       reg |= PARALLEL_DET_IGNORE_ADVERTISED;
+       phy_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, reg);
+
+       /* The below register writes are tweaking analog and electrical
+        * configuration that were determined through characterization by PHY
+        * engineers. These don't mean anything more than "these are the best
+        * values".
+        */
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_3);
+
+       phy_base_write(phydev, MSCC_PHY_SERDES_TX_CRC_ERR_CNT, 0x2000);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+       phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1f20);
+
+       reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+       reg |= 0x8000;
+       phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+       phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(0x2fa4));
+
+       reg = phy_base_read(phydev, MSCC_PHY_TR_MSB);
+       reg &= ~0x007f;
+       reg |= 0x0019;
+       phy_base_write(phydev, MSCC_PHY_TR_MSB, reg);
+
+       phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(0x0fa4));
+
+       for (i = 0; i < ARRAY_SIZE(pre_init1); i++)
+               vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2);
+
+       phy_base_write(phydev, MSCC_PHY_CU_PMD_TX_CNTL, 0x028e);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+       for (i = 0; i < ARRAY_SIZE(pre_init2); i++)
+               vsc8584_csr_write(phydev, pre_init2[i].reg, pre_init2[i].val);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+       reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+       reg &= ~0x8000;
+       phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       /* end of write broadcasting */
+       reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+       reg &= ~SMI_BROADCAST_WR_EN;
+       phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+       ret = request_firmware(&fw, MSCC_VSC8584_REVB_INT8051_FW, dev);
+       if (ret) {
+               dev_err(dev, "failed to load firmware %s, ret: %d\n",
+                       MSCC_VSC8584_REVB_INT8051_FW, ret);
+               return ret;
+       }
+
+       /* Add one byte to size for the one added by the patch_fw function */
+       ret = vsc8584_get_fw_crc(phydev,
+                                MSCC_VSC8584_REVB_INT8051_FW_START_ADDR,
+                                fw->size + 1, &crc);
+       if (ret)
+               goto out;
+
+       if (crc != MSCC_VSC8584_REVB_INT8051_FW_CRC) {
+               dev_dbg(dev, "FW CRC is not the expected one, patching FW\n");
+               if (vsc8584_patch_fw(phydev, fw))
+                       dev_warn(dev,
+                                "failed to patch FW, expect non-optimal device\n");
+       }
+
+       vsc8584_micro_deassert_reset(phydev, false);
+
+       /* Add one byte to size for the one added by the patch_fw function */
+       ret = vsc8584_get_fw_crc(phydev,
+                                MSCC_VSC8584_REVB_INT8051_FW_START_ADDR,
+                                fw->size + 1, &crc);
+       if (ret)
+               goto out;
+
+       if (crc != MSCC_VSC8584_REVB_INT8051_FW_CRC)
+               dev_warn(dev,
+                        "FW CRC after patching is not the expected one, expect non-optimal device\n");
+
+       ret = vsc8584_micro_assert_reset(phydev);
+       if (ret)
+               goto out;
+
+       vsc8584_micro_deassert_reset(phydev, true);
+
+out:
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       release_firmware(fw);
+
+       return ret;
+}
+
+/* Check if one PHY has already done the init of the parts common to all PHYs
+ * in the Quad PHY package.
+ */
+static bool vsc8584_is_pkg_init(struct phy_device *phydev, bool reversed)
+{
+       struct mdio_device **map = phydev->mdio.bus->mdio_map;
+       struct vsc8531_private *vsc8531;
+       struct phy_device *phy;
+       int i, addr;
+
+       /* VSC8584 is a Quad PHY */
+       for (i = 0; i < 4; i++) {
+               vsc8531 = phydev->priv;
+
+               if (reversed)
+                       addr = vsc8531->base_addr - i;
+               else
+                       addr = vsc8531->base_addr + i;
+
+               phy = container_of(map[addr], struct phy_device, mdio);
+
+               if ((phy->phy_id & phydev->drv->phy_id_mask) !=
+                   (phydev->drv->phy_id & phydev->drv->phy_id_mask))
+                       continue;
+
+               vsc8531 = phy->priv;
+
+               if (vsc8531 && vsc8531->pkg_init)
+                       return true;
+       }
+
+       return false;
+}
+
+static int vsc8584_config_init(struct phy_device *phydev)
+{
+       struct vsc8531_private *vsc8531 = phydev->priv;
+       u16 addr, val;
+       int ret, i;
+
+       phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+       mutex_lock(&phydev->mdio.bus->mdio_lock);
+
+       __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr,
+                       MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
+       addr = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
+                             MSCC_PHY_EXT_PHY_CNTL_4);
+       addr >>= PHY_CNTL_4_ADDR_POS;
+
+       val = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
+                            MSCC_PHY_ACTIPHY_CNTL);
+       if (val & PHY_ADDR_REVERSED)
+               vsc8531->base_addr = phydev->mdio.addr + addr;
+       else
+               vsc8531->base_addr = phydev->mdio.addr - addr;
+
+       /* Some parts of the init sequence are identical for every PHY in the
+        * package. Some parts are modifying the GPIO register bank which is a
+        * set of registers that are affecting all PHYs, a few resetting the
+        * microprocessor common to all PHYs. The CRC check responsible of the
+        * checking the firmware within the 8051 microprocessor can only be
+        * accessed via the PHY whose internal address in the package is 0.
+        * All PHYs' interrupts mask register has to be zeroed before enabling
+        * any PHY's interrupt in this register.
+        * For all these reasons, we need to do the init sequence once and only
+        * once whatever is the first PHY in the package that is initialized and
+        * do the correct init sequence for all PHYs that are package-critical
+        * in this pre-init function.
+        */
+       if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0)) {
+               if ((phydev->phy_id & phydev->drv->phy_id_mask) ==
+                   (PHY_ID_VSC8574 & phydev->drv->phy_id_mask))
+                       ret = vsc8574_config_pre_init(phydev);
+               else if ((phydev->phy_id & phydev->drv->phy_id_mask) ==
+                        (PHY_ID_VSC8584 & phydev->drv->phy_id_mask))
+                       ret = vsc8584_config_pre_init(phydev);
+               else
+                       ret = -EINVAL;
+
+               if (ret)
+                       goto err;
+       }
+
+       vsc8531->pkg_init = true;
+
+       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                      MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+       val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
+       val &= ~MAC_CFG_MASK;
+       if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
+               val |= MAC_CFG_QSGMII;
+       else
+               val |= MAC_CFG_SGMII;
+
+       ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
+       if (ret)
+               goto err;
+
+       val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
+               PROC_CMD_READ_MOD_WRITE_PORT;
+       if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
+               val |= PROC_CMD_QSGMII_MAC;
+       else
+               val |= PROC_CMD_SGMII_MAC;
+
+       ret = vsc8584_cmd(phydev, val);
+       if (ret)
+               goto err;
+
+       usleep_range(10000, 20000);
+
+       /* Disable SerDes for 100Base-FX */
+       ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
+                         PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE |
+                         PROC_CMD_READ_MOD_WRITE_PORT |
+                         PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX);
+       if (ret)
+               goto err;
+
+       /* Disable SerDes for 1000Base-X */
+       ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
+                         PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE |
+                         PROC_CMD_READ_MOD_WRITE_PORT |
+                         PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X);
+       if (ret)
+               goto err;
+
+       mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+       phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
+       val &= ~(MEDIA_OP_MODE_MASK | VSC8584_MAC_IF_SELECTION_MASK);
+       val |= MEDIA_OP_MODE_COPPER | (VSC8584_MAC_IF_SELECTION_SGMII <<
+                                      VSC8584_MAC_IF_SELECTION_POS);
+       ret = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, val);
+
+       ret = genphy_soft_reset(phydev);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < vsc8531->nleds; i++) {
+               ret = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]);
+               if (ret)
+                       return ret;
+       }
+
+       return genphy_config_init(phydev);
+
+err:
+       mutex_unlock(&phydev->mdio.bus->mdio_lock);
+       return ret;
+}
+
 static int vsc85xx_config_init(struct phy_device *phydev)
 {
        int rc, i;
@@ -593,15 +1669,27 @@ static int vsc85xx_config_init(struct phy_device *phydev)
        if (rc)
                return rc;
 
+       rc = vsc85xx_eee_init_seq_set(phydev);
+       if (rc)
+               return rc;
+
        for (i = 0; i < vsc8531->nleds; i++) {
                rc = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]);
                if (rc)
                        return rc;
        }
 
-       rc = genphy_config_init(phydev);
+       return genphy_config_init(phydev);
+}
 
-       return rc;
+static int vsc8584_did_interrupt(struct phy_device *phydev)
+{
+       int rc = 0;
+
+       if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+               rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+
+       return (rc < 0) ? 0 : rc & MII_VSC85XX_INT_MASK_MASK;
 }
 
 static int vsc85xx_ack_interrupt(struct phy_device *phydev)
@@ -653,6 +1741,61 @@ static int vsc85xx_read_status(struct phy_device *phydev)
        return genphy_read_status(phydev);
 }
 
+static int vsc8574_probe(struct phy_device *phydev)
+{
+       struct vsc8531_private *vsc8531;
+       u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
+          VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
+          VSC8531_DUPLEX_COLLISION};
+
+       vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
+       if (!vsc8531)
+               return -ENOMEM;
+
+       phydev->priv = vsc8531;
+
+       vsc8531->nleds = 4;
+       vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
+       vsc8531->hw_stats = vsc8584_hw_stats;
+       vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
+       vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+                                           sizeof(u64), GFP_KERNEL);
+       if (!vsc8531->stats)
+               return -ENOMEM;
+
+       return vsc85xx_dt_led_modes_get(phydev, default_mode);
+}
+
+static int vsc8584_probe(struct phy_device *phydev)
+{
+       struct vsc8531_private *vsc8531;
+       u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
+          VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
+          VSC8531_DUPLEX_COLLISION};
+
+       if ((phydev->phy_id & MSCC_DEV_REV_MASK) != VSC8584_REVB) {
+               dev_err(&phydev->mdio.dev, "Only VSC8584 revB is supported.\n");
+               return -ENOTSUPP;
+       }
+
+       vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
+       if (!vsc8531)
+               return -ENOMEM;
+
+       phydev->priv = vsc8531;
+
+       vsc8531->nleds = 4;
+       vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
+       vsc8531->hw_stats = vsc8584_hw_stats;
+       vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
+       vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+                                           sizeof(u64), GFP_KERNEL);
+       if (!vsc8531->stats)
+               return -ENOMEM;
+
+       return vsc85xx_dt_led_modes_get(phydev, default_mode);
+}
+
 static int vsc85xx_probe(struct phy_device *phydev)
 {
        struct vsc8531_private *vsc8531;
@@ -673,6 +1816,12 @@ static int vsc85xx_probe(struct phy_device *phydev)
        vsc8531->rate_magic = rate_magic;
        vsc8531->nleds = 2;
        vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
+       vsc8531->hw_stats = vsc85xx_hw_stats;
+       vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
+       vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+                                           sizeof(u64), GFP_KERNEL);
+       if (!vsc8531->stats)
+               return -ENOMEM;
 
        return vsc85xx_dt_led_modes_get(phydev, default_mode);
 }
@@ -699,6 +1848,11 @@ static struct phy_driver vsc85xx_driver[] = {
        .get_wol        = &vsc85xx_wol_get,
        .get_tunable    = &vsc85xx_get_tunable,
        .set_tunable    = &vsc85xx_set_tunable,
+       .read_page      = &vsc85xx_phy_read_page,
+       .write_page     = &vsc85xx_phy_write_page,
+       .get_sset_count = &vsc85xx_get_sset_count,
+       .get_strings    = &vsc85xx_get_strings,
+       .get_stats      = &vsc85xx_get_stats,
 },
 {
        .phy_id         = PHY_ID_VSC8531,
@@ -720,6 +1874,11 @@ static struct phy_driver vsc85xx_driver[] = {
        .get_wol        = &vsc85xx_wol_get,
        .get_tunable    = &vsc85xx_get_tunable,
        .set_tunable    = &vsc85xx_set_tunable,
+       .read_page      = &vsc85xx_phy_read_page,
+       .write_page     = &vsc85xx_phy_write_page,
+       .get_sset_count = &vsc85xx_get_sset_count,
+       .get_strings    = &vsc85xx_get_strings,
+       .get_stats      = &vsc85xx_get_stats,
 },
 {
        .phy_id         = PHY_ID_VSC8540,
@@ -741,6 +1900,11 @@ static struct phy_driver vsc85xx_driver[] = {
        .get_wol        = &vsc85xx_wol_get,
        .get_tunable    = &vsc85xx_get_tunable,
        .set_tunable    = &vsc85xx_set_tunable,
+       .read_page      = &vsc85xx_phy_read_page,
+       .write_page     = &vsc85xx_phy_write_page,
+       .get_sset_count = &vsc85xx_get_sset_count,
+       .get_strings    = &vsc85xx_get_strings,
+       .get_stats      = &vsc85xx_get_stats,
 },
 {
        .phy_id         = PHY_ID_VSC8541,
@@ -762,6 +1926,63 @@ static struct phy_driver vsc85xx_driver[] = {
        .get_wol        = &vsc85xx_wol_get,
        .get_tunable    = &vsc85xx_get_tunable,
        .set_tunable    = &vsc85xx_set_tunable,
+       .read_page      = &vsc85xx_phy_read_page,
+       .write_page     = &vsc85xx_phy_write_page,
+       .get_sset_count = &vsc85xx_get_sset_count,
+       .get_strings    = &vsc85xx_get_strings,
+       .get_stats      = &vsc85xx_get_stats,
+},
+{
+       .phy_id         = PHY_ID_VSC8574,
+       .name           = "Microsemi GE VSC8574 SyncE",
+       .phy_id_mask    = 0xfffffff0,
+       .features       = PHY_GBIT_FEATURES,
+       .flags          = PHY_HAS_INTERRUPT,
+       .soft_reset     = &genphy_soft_reset,
+       .config_init    = &vsc8584_config_init,
+       .config_aneg    = &vsc85xx_config_aneg,
+       .aneg_done      = &genphy_aneg_done,
+       .read_status    = &vsc85xx_read_status,
+       .ack_interrupt  = &vsc85xx_ack_interrupt,
+       .config_intr    = &vsc85xx_config_intr,
+       .did_interrupt  = &vsc8584_did_interrupt,
+       .suspend        = &genphy_suspend,
+       .resume         = &genphy_resume,
+       .probe          = &vsc8574_probe,
+       .set_wol        = &vsc85xx_wol_set,
+       .get_wol        = &vsc85xx_wol_get,
+       .get_tunable    = &vsc85xx_get_tunable,
+       .set_tunable    = &vsc85xx_set_tunable,
+       .read_page      = &vsc85xx_phy_read_page,
+       .write_page     = &vsc85xx_phy_write_page,
+       .get_sset_count = &vsc85xx_get_sset_count,
+       .get_strings    = &vsc85xx_get_strings,
+       .get_stats      = &vsc85xx_get_stats,
+},
+{
+       .phy_id         = PHY_ID_VSC8584,
+       .name           = "Microsemi GE VSC8584 SyncE",
+       .phy_id_mask    = 0xfffffff0,
+       .features       = PHY_GBIT_FEATURES,
+       .flags          = PHY_HAS_INTERRUPT,
+       .soft_reset     = &genphy_soft_reset,
+       .config_init    = &vsc8584_config_init,
+       .config_aneg    = &vsc85xx_config_aneg,
+       .aneg_done      = &genphy_aneg_done,
+       .read_status    = &vsc85xx_read_status,
+       .ack_interrupt  = &vsc85xx_ack_interrupt,
+       .config_intr    = &vsc85xx_config_intr,
+       .did_interrupt  = &vsc8584_did_interrupt,
+       .suspend        = &genphy_suspend,
+       .resume         = &genphy_resume,
+       .probe          = &vsc8584_probe,
+       .get_tunable    = &vsc85xx_get_tunable,
+       .set_tunable    = &vsc85xx_set_tunable,
+       .read_page      = &vsc85xx_phy_read_page,
+       .write_page     = &vsc85xx_phy_write_page,
+       .get_sset_count = &vsc85xx_get_sset_count,
+       .get_strings    = &vsc85xx_get_strings,
+       .get_stats      = &vsc85xx_get_stats,
 }
 
 };
@@ -773,6 +1994,8 @@ static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
        { PHY_ID_VSC8531, 0xfffffff0, },
        { PHY_ID_VSC8540, 0xfffffff0, },
        { PHY_ID_VSC8541, 0xfffffff0, },
+       { PHY_ID_VSC8574, 0xfffffff0, },
+       { PHY_ID_VSC8584, 0xfffffff0, },
        { }
 };
 
index a1f8e4816f7271807d98eb92e76aa6ec370072a4..14509a8903c6c625838f89a5c3183ec362589999 100644 (file)
@@ -537,7 +537,7 @@ out_unlock:
        mutex_unlock(&phydev->lock);
 
        if (trigger)
-               phy_trigger_machine(phydev, sync);
+               phy_trigger_machine(phydev);
 
        return err;
 }
@@ -635,6 +635,13 @@ int phy_speed_up(struct phy_device *phydev)
 }
 EXPORT_SYMBOL_GPL(phy_speed_up);
 
+static void phy_queue_state_machine(struct phy_device *phydev,
+                                   unsigned int secs)
+{
+       mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
+                        secs * HZ);
+}
+
 /**
  * phy_start_machine - start PHY state machine tracking
  * @phydev: the phy_device struct
@@ -647,7 +654,7 @@ EXPORT_SYMBOL_GPL(phy_speed_up);
  */
 void phy_start_machine(struct phy_device *phydev)
 {
-       queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
+       phy_queue_state_machine(phydev, 1);
 }
 EXPORT_SYMBOL_GPL(phy_start_machine);
 
@@ -655,19 +662,14 @@ EXPORT_SYMBOL_GPL(phy_start_machine);
  * phy_trigger_machine - trigger the state machine to run
  *
  * @phydev: the phy_device struct
- * @sync: indicate whether we should wait for the workqueue cancelation
  *
  * Description: There has been a change in state which requires that the
  *   state machine runs.
  */
 
-void phy_trigger_machine(struct phy_device *phydev, bool sync)
+void phy_trigger_machine(struct phy_device *phydev)
 {
-       if (sync)
-               cancel_delayed_work_sync(&phydev->state_queue);
-       else
-               cancel_delayed_work(&phydev->state_queue);
-       queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
+       phy_queue_state_machine(phydev, 0);
 }
 
 /**
@@ -703,7 +705,7 @@ static void phy_error(struct phy_device *phydev)
        phydev->state = PHY_HALTED;
        mutex_unlock(&phydev->lock);
 
-       phy_trigger_machine(phydev, false);
+       phy_trigger_machine(phydev);
 }
 
 /**
@@ -745,7 +747,7 @@ static irqreturn_t phy_change(struct phy_device *phydev)
        mutex_unlock(&phydev->lock);
 
        /* reschedule state queue work to run as soon as possible */
-       phy_trigger_machine(phydev, true);
+       phy_trigger_machine(phydev);
 
        if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
                goto phy_err;
@@ -911,7 +913,7 @@ void phy_start(struct phy_device *phydev)
        }
        mutex_unlock(&phydev->lock);
 
-       phy_trigger_machine(phydev, true);
+       phy_trigger_machine(phydev);
 }
 EXPORT_SYMBOL(phy_start);
 
@@ -1130,8 +1132,7 @@ void phy_state_machine(struct work_struct *work)
         * called from phy_disconnect() synchronously.
         */
        if (phy_polling_mode(phydev) && old_state != PHY_HALTED)
-               queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
-                                  PHY_STATE_TIME * HZ);
+               phy_queue_state_machine(phydev, PHY_STATE_TIME);
 }
 
 /**
index ee676d75fe02c1afd9cb6573307afd4709f041f3..43cb08dcce818f555e5623248be2fcf5a33de2ed 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/mii.h>
 #include <linux/ethtool.h>
+#include <linux/bitmap.h>
 #include <linux/phy.h>
 #include <linux/phy_led_triggers.h>
 #include <linux/mdio.h>
@@ -42,6 +43,149 @@ MODULE_DESCRIPTION("PHY library");
 MODULE_AUTHOR("Andy Fleming");
 MODULE_LICENSE("GPL");
 
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_basic_features);
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_basic_t1_features);
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_gbit_features);
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_gbit_fibre_features);
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features);
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_10gbit_features);
+
+static const int phy_basic_ports_array[] = {
+       ETHTOOL_LINK_MODE_Autoneg_BIT,
+       ETHTOOL_LINK_MODE_TP_BIT,
+       ETHTOOL_LINK_MODE_MII_BIT,
+};
+
+static const int phy_fibre_port_array[] = {
+       ETHTOOL_LINK_MODE_FIBRE_BIT,
+};
+
+static const int phy_all_ports_features_array[] = {
+       ETHTOOL_LINK_MODE_Autoneg_BIT,
+       ETHTOOL_LINK_MODE_TP_BIT,
+       ETHTOOL_LINK_MODE_MII_BIT,
+       ETHTOOL_LINK_MODE_FIBRE_BIT,
+       ETHTOOL_LINK_MODE_AUI_BIT,
+       ETHTOOL_LINK_MODE_BNC_BIT,
+       ETHTOOL_LINK_MODE_Backplane_BIT,
+};
+
+static const int phy_10_100_features_array[] = {
+       ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+       ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+       ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+       ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+};
+
+static const int phy_basic_t1_features_array[] = {
+       ETHTOOL_LINK_MODE_TP_BIT,
+       ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+};
+
+static const int phy_gbit_features_array[] = {
+       ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+       ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+};
+
+static const int phy_10gbit_features_array[] = {
+       ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+};
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
+
+static const int phy_10gbit_full_features_array[] = {
+       ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+       ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+       ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+};
+
+static void features_init(void)
+{
+       /* 10/100 half/full*/
+       linkmode_set_bit_array(phy_basic_ports_array,
+                              ARRAY_SIZE(phy_basic_ports_array),
+                              phy_basic_features);
+       linkmode_set_bit_array(phy_10_100_features_array,
+                              ARRAY_SIZE(phy_10_100_features_array),
+                              phy_basic_features);
+
+       /* 100 full, TP */
+       linkmode_set_bit_array(phy_basic_t1_features_array,
+                              ARRAY_SIZE(phy_basic_t1_features_array),
+                              phy_basic_t1_features);
+
+       /* 10/100 half/full + 1000 half/full */
+       linkmode_set_bit_array(phy_basic_ports_array,
+                              ARRAY_SIZE(phy_basic_ports_array),
+                              phy_gbit_features);
+       linkmode_set_bit_array(phy_10_100_features_array,
+                              ARRAY_SIZE(phy_10_100_features_array),
+                              phy_gbit_features);
+       linkmode_set_bit_array(phy_gbit_features_array,
+                              ARRAY_SIZE(phy_gbit_features_array),
+                              phy_gbit_features);
+
+       /* 10/100 half/full + 1000 half/full + fibre*/
+       linkmode_set_bit_array(phy_basic_ports_array,
+                              ARRAY_SIZE(phy_basic_ports_array),
+                              phy_gbit_fibre_features);
+       linkmode_set_bit_array(phy_10_100_features_array,
+                              ARRAY_SIZE(phy_10_100_features_array),
+                              phy_gbit_fibre_features);
+       linkmode_set_bit_array(phy_gbit_features_array,
+                              ARRAY_SIZE(phy_gbit_features_array),
+                              phy_gbit_fibre_features);
+       linkmode_set_bit_array(phy_fibre_port_array,
+                              ARRAY_SIZE(phy_fibre_port_array),
+                              phy_gbit_fibre_features);
+
+       /* 10/100 half/full + 1000 half/full + TP/MII/FIBRE/AUI/BNC/Backplane*/
+       linkmode_set_bit_array(phy_all_ports_features_array,
+                              ARRAY_SIZE(phy_all_ports_features_array),
+                              phy_gbit_all_ports_features);
+       linkmode_set_bit_array(phy_10_100_features_array,
+                              ARRAY_SIZE(phy_10_100_features_array),
+                              phy_gbit_all_ports_features);
+       linkmode_set_bit_array(phy_gbit_features_array,
+                              ARRAY_SIZE(phy_gbit_features_array),
+                              phy_gbit_all_ports_features);
+
+       /* 10/100 half/full + 1000 half/full + 10G full*/
+       linkmode_set_bit_array(phy_all_ports_features_array,
+                              ARRAY_SIZE(phy_all_ports_features_array),
+                              phy_10gbit_features);
+       linkmode_set_bit_array(phy_10_100_features_array,
+                              ARRAY_SIZE(phy_10_100_features_array),
+                              phy_10gbit_features);
+       linkmode_set_bit_array(phy_gbit_features_array,
+                              ARRAY_SIZE(phy_gbit_features_array),
+                              phy_10gbit_features);
+       linkmode_set_bit_array(phy_10gbit_features_array,
+                              ARRAY_SIZE(phy_10gbit_features_array),
+                              phy_10gbit_features);
+
+       /* 10/100/1000/10G full */
+       linkmode_set_bit_array(phy_all_ports_features_array,
+                              ARRAY_SIZE(phy_all_ports_features_array),
+                              phy_10gbit_full_features);
+       linkmode_set_bit_array(phy_10gbit_full_features_array,
+                              ARRAY_SIZE(phy_10gbit_full_features_array),
+                              phy_10gbit_full_features);
+}
+
 void phy_device_free(struct phy_device *phydev)
 {
        put_device(&phydev->mdio.dev);
@@ -93,7 +237,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
        if (!netdev)
                return !phydev->suspended;
 
-       /* Don't suspend PHY if the attached netdev parent may wakeup.
+       if (netdev->wol_enabled)
+               return false;
+
+       /* As long as not all affected network drivers support the
+        * wol_enabled flag, let's check for hints that WoL is enabled.
+        * Don't suspend PHY if the attached netdev parent may wake up.
         * The parent may point to a PCI device, as in tg3 driver.
         */
        if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
@@ -920,13 +1069,13 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
 
 
        if (!fmt) {
-               dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n",
+               phydev_info(phydev, ATTACHED_FMT "\n",
                         drv_name, phydev_name(phydev),
                         irq_str);
        } else {
                va_list ap;
 
-               dev_info(&phydev->mdio.dev, ATTACHED_FMT,
+               phydev_info(phydev, ATTACHED_FMT,
                         drv_name, phydev_name(phydev),
                         irq_str);
 
@@ -1130,9 +1279,9 @@ void phy_detach(struct phy_device *phydev)
                sysfs_remove_link(&dev->dev.kobj, "phydev");
                sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
        }
+       phy_suspend(phydev);
        phydev->attached_dev->phydev = NULL;
        phydev->attached_dev = NULL;
-       phy_suspend(phydev);
        phydev->phylink = NULL;
 
        phy_led_triggers_unregister(phydev);
@@ -1166,12 +1315,13 @@ EXPORT_SYMBOL(phy_detach);
 int phy_suspend(struct phy_device *phydev)
 {
        struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
+       struct net_device *netdev = phydev->attached_dev;
        struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
        int ret = 0;
 
        /* If the device has WOL enabled, we cannot suspend the PHY */
        phy_ethtool_get_wol(phydev, &wol);
-       if (wol.wolopts)
+       if (wol.wolopts || (netdev && netdev->wol_enabled))
                return -EBUSY;
 
        if (phydev->drv && phydrv->suspend)
@@ -1936,6 +2086,7 @@ static int phy_probe(struct device *dev)
        struct phy_device *phydev = to_phy_device(dev);
        struct device_driver *drv = phydev->mdio.dev.driver;
        struct phy_driver *phydrv = to_phy_driver(drv);
+       u32 features;
        int err = 0;
 
        phydev->drv = phydrv;
@@ -1956,7 +2107,8 @@ static int phy_probe(struct device *dev)
         * a controller will attach, and may modify one
         * or both of these values
         */
-       phydev->supported = phydrv->features;
+       ethtool_convert_link_mode_to_legacy_u32(&features, phydrv->features);
+       phydev->supported = features;
        of_set_phy_supported(phydev);
        phydev->advertising = phydev->supported;
 
@@ -1976,10 +2128,14 @@ static int phy_probe(struct device *dev)
         * (e.g. hardware erratum) where the driver wants to set only one
         * of these bits.
         */
-       if (phydrv->features & (SUPPORTED_Pause | SUPPORTED_Asym_Pause)) {
+       if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features) ||
+           test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) {
                phydev->supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-               phydev->supported |= phydrv->features &
-                                    (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+               if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features))
+                       phydev->supported |= SUPPORTED_Pause;
+               if (test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+                            phydrv->features))
+                       phydev->supported |= SUPPORTED_Asym_Pause;
        } else {
                phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
        }
@@ -2092,9 +2248,7 @@ static struct phy_driver genphy_driver = {
        .name           = "Generic PHY",
        .soft_reset     = genphy_no_soft_reset,
        .config_init    = genphy_config_init,
-       .features       = PHY_GBIT_FEATURES | SUPPORTED_MII |
-                         SUPPORTED_AUI | SUPPORTED_FIBRE |
-                         SUPPORTED_BNC,
+       .features       = PHY_GBIT_ALL_PORTS_FEATURES,
        .aneg_done      = genphy_aneg_done,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
@@ -2109,6 +2263,8 @@ static int __init phy_init(void)
        if (rc)
                return rc;
 
+       features_init();
+
        rc = phy_driver_register(&genphy_10g_driver, THIS_MODULE);
        if (rc)
                goto err_10g;
index 1d01e0c625a569c7d78df8941583b81c97a64e1a..9b8dd0d0ee42ce0ec35ba6a0add122bc6aadb8a4 100644 (file)
@@ -68,33 +68,6 @@ struct phylink {
        struct sfp_bus *sfp_bus;
 };
 
-static inline void linkmode_zero(unsigned long *dst)
-{
-       bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static inline void linkmode_copy(unsigned long *dst, const unsigned long *src)
-{
-       bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static inline void linkmode_and(unsigned long *dst, const unsigned long *a,
-                               const unsigned long *b)
-{
-       bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static inline void linkmode_or(unsigned long *dst, const unsigned long *a,
-                               const unsigned long *b)
-{
-       bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static inline bool linkmode_empty(const unsigned long *src)
-{
-       return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
 /**
  * phylink_set_port_modes() - set the port type modes in the ethtool mask
  * @mask: ethtool link mode mask
@@ -717,6 +690,30 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
        return 0;
 }
 
+static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
+               phy_interface_t interface)
+{
+       int ret;
+
+       if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
+                   (pl->link_an_mode == MLO_AN_INBAND &&
+                    phy_interface_mode_is_8023z(interface))))
+               return -EINVAL;
+
+       if (pl->phydev)
+               return -EBUSY;
+
+       ret = phy_attach_direct(pl->netdev, phy, 0, interface);
+       if (ret)
+               return ret;
+
+       ret = phylink_bringup_phy(pl, phy);
+       if (ret)
+               phy_detach(phy);
+
+       return ret;
+}
+
 /**
  * phylink_connect_phy() - connect a PHY to the phylink instance
  * @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -734,31 +731,13 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
  */
 int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
 {
-       int ret;
-
-       if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
-                   (pl->link_an_mode == MLO_AN_INBAND &&
-                    phy_interface_mode_is_8023z(pl->link_interface))))
-               return -EINVAL;
-
-       if (pl->phydev)
-               return -EBUSY;
-
        /* Use PHY device/driver interface */
        if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
                pl->link_interface = phy->interface;
                pl->link_config.interface = pl->link_interface;
        }
 
-       ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface);
-       if (ret)
-               return ret;
-
-       ret = phylink_bringup_phy(pl, phy);
-       if (ret)
-               phy_detach(phy);
-
-       return ret;
+       return __phylink_connect_phy(pl, phy, pl->link_interface);
 }
 EXPORT_SYMBOL_GPL(phylink_connect_phy);
 
@@ -1675,7 +1654,9 @@ static void phylink_sfp_link_up(void *upstream)
 
 static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
 {
-       return phylink_connect_phy(upstream, phy);
+       struct phylink *pl = upstream;
+
+       return __phylink_connect_phy(upstream, phy, pl->link_config.interface);
 }
 
 static void phylink_sfp_disconnect_phy(void *upstream)
index 52fffb98fde9ac3fd05c7f6fd8e5dc123ecae341..6e13b8832bc7df94467211f07c1e7dba15a6e877 100644 (file)
@@ -1098,8 +1098,11 @@ static int sfp_hwmon_insert(struct sfp *sfp)
 
 static void sfp_hwmon_remove(struct sfp *sfp)
 {
-       hwmon_device_unregister(sfp->hwmon_dev);
-       kfree(sfp->hwmon_name);
+       if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) {
+               hwmon_device_unregister(sfp->hwmon_dev);
+               sfp->hwmon_dev = NULL;
+               kfree(sfp->hwmon_name);
+       }
 }
 #else
 static int sfp_hwmon_insert(struct sfp *sfp)
index 6a047d30e8c69f81cfb234113d66d03d216878ac..d887016e54b68dc06a1bdae7d1a72391020baf0d 100644 (file)
@@ -1167,6 +1167,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
                return -EBUSY;
        }
 
+       if (dev == port_dev) {
+               NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
+               netdev_err(dev, "Cannot enslave team device to itself\n");
+               return -EINVAL;
+       }
+
        if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
            vlan_uses_dev(dev)) {
                NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
index 3eb88b7147f07b88743a67a1726cb0647b0bf99b..b1743420939b48b4259c84144249ab52efcc5b8c 100644 (file)
@@ -180,6 +180,7 @@ struct tun_file {
        };
        struct napi_struct napi;
        bool napi_enabled;
+       bool napi_frags_enabled;
        struct mutex napi_mutex;        /* Protects access to the above napi */
        struct list_head next;
        struct tun_struct *detached;
@@ -312,32 +313,32 @@ static int tun_napi_poll(struct napi_struct *napi, int budget)
 }
 
 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
-                         bool napi_en)
+                         bool napi_en, bool napi_frags)
 {
        tfile->napi_enabled = napi_en;
+       tfile->napi_frags_enabled = napi_en && napi_frags;
        if (napi_en) {
                netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
                               NAPI_POLL_WEIGHT);
                napi_enable(&tfile->napi);
-               mutex_init(&tfile->napi_mutex);
        }
 }
 
-static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_disable(struct tun_file *tfile)
 {
        if (tfile->napi_enabled)
                napi_disable(&tfile->napi);
 }
 
-static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_del(struct tun_file *tfile)
 {
        if (tfile->napi_enabled)
                netif_napi_del(&tfile->napi);
 }
 
-static bool tun_napi_frags_enabled(const struct tun_struct *tun)
+static bool tun_napi_frags_enabled(const struct tun_file *tfile)
 {
-       return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
+       return tfile->napi_frags_enabled;
 }
 
 #ifdef CONFIG_TUN_VNET_CROSS_LE
@@ -689,8 +690,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
        tun = rtnl_dereference(tfile->tun);
 
        if (tun && clean) {
-               tun_napi_disable(tun, tfile);
-               tun_napi_del(tun, tfile);
+               tun_napi_disable(tfile);
+               tun_napi_del(tfile);
        }
 
        if (tun && !tfile->detached) {
@@ -757,7 +758,7 @@ static void tun_detach_all(struct net_device *dev)
        for (i = 0; i < n; i++) {
                tfile = rtnl_dereference(tun->tfiles[i]);
                BUG_ON(!tfile);
-               tun_napi_disable(tun, tfile);
+               tun_napi_disable(tfile);
                tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
                tfile->socket.sk->sk_data_ready(tfile->socket.sk);
                RCU_INIT_POINTER(tfile->tun, NULL);
@@ -773,7 +774,7 @@ static void tun_detach_all(struct net_device *dev)
        synchronize_net();
        for (i = 0; i < n; i++) {
                tfile = rtnl_dereference(tun->tfiles[i]);
-               tun_napi_del(tun, tfile);
+               tun_napi_del(tfile);
                /* Drop read queue */
                tun_queue_purge(tfile);
                xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -792,7 +793,7 @@ static void tun_detach_all(struct net_device *dev)
 }
 
 static int tun_attach(struct tun_struct *tun, struct file *file,
-                     bool skip_filter, bool napi)
+                     bool skip_filter, bool napi, bool napi_frags)
 {
        struct tun_file *tfile = file->private_data;
        struct net_device *dev = tun->dev;
@@ -865,7 +866,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
                tun_enable_queue(tfile);
        } else {
                sock_hold(&tfile->sk);
-               tun_napi_init(tun, tfile, napi);
+               tun_napi_init(tun, tfile, napi, napi_frags);
        }
 
        if (rtnl_dereference(tun->xdp_prog))
@@ -1743,7 +1744,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        int err;
        u32 rxhash = 0;
        int skb_xdp = 1;
-       bool frags = tun_napi_frags_enabled(tun);
+       bool frags = tun_napi_frags_enabled(tfile);
 
        if (!(tun->dev->flags & IFF_UP))
                return -EIO;
@@ -2683,7 +2684,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                        return err;
 
                err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
-                                ifr->ifr_flags & IFF_NAPI);
+                                ifr->ifr_flags & IFF_NAPI,
+                                ifr->ifr_flags & IFF_NAPI_FRAGS);
                if (err < 0)
                        return err;
 
@@ -2781,7 +2783,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                              (ifr->ifr_flags & TUN_FEATURES);
 
                INIT_LIST_HEAD(&tun->disabled);
-               err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
+               err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
+                                ifr->ifr_flags & IFF_NAPI_FRAGS);
                if (err < 0)
                        goto err_free_flow;
 
@@ -2930,7 +2933,8 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
                ret = security_tun_dev_attach_queue(tun->security);
                if (ret < 0)
                        goto unlock;
-               ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
+               ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
+                                tun->flags & IFF_NAPI_FRAGS);
        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
                tun = rtnl_dereference(tfile->tun);
                if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
@@ -3348,6 +3352,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
                return -ENOMEM;
        }
 
+       mutex_init(&tfile->napi_mutex);
        RCU_INIT_POINTER(tfile->tun, NULL);
        tfile->flags = 0;
        tfile->ifindex = 0;
index e95dd12edec473198125c18c1cec6bc7d32ec368..023b8d0bf1754e833e08514b9cf6165ce3240984 100644 (file)
@@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        struct usbnet *dev = netdev_priv(net);
        u8 opt = 0;
 
+       if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+               return -EINVAL;
+
        if (wolinfo->wolopts & WAKE_PHY)
                opt |= AX_MONITOR_LINK;
        if (wolinfo->wolopts & WAKE_MAGIC)
index 9e8ad372f4190eed1d4e92891193d325c44fb47f..2207f7a7d1ffbb3fe6c4fefa101c4bb2ae01384e 100644 (file)
@@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        struct usbnet *dev = netdev_priv(net);
        u8 opt = 0;
 
+       if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+               return -EINVAL;
+
        if (wolinfo->wolopts & WAKE_PHY)
                opt |= AX_MONITOR_MODE_RWLC;
        if (wolinfo->wolopts & WAKE_MAGIC)
index 656441d9a95502f35b211de912f35a6a51640850..be1917be28f2d457c561a117ea92dd49c7f62d50 100644 (file)
@@ -1387,19 +1387,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
        if (ret < 0)
                return ret;
 
-       pdata->wol = 0;
-       if (wol->wolopts & WAKE_UCAST)
-               pdata->wol |= WAKE_UCAST;
-       if (wol->wolopts & WAKE_MCAST)
-               pdata->wol |= WAKE_MCAST;
-       if (wol->wolopts & WAKE_BCAST)
-               pdata->wol |= WAKE_BCAST;
-       if (wol->wolopts & WAKE_MAGIC)
-               pdata->wol |= WAKE_MAGIC;
-       if (wol->wolopts & WAKE_PHY)
-               pdata->wol |= WAKE_PHY;
-       if (wol->wolopts & WAKE_ARP)
-               pdata->wol |= WAKE_ARP;
+       if (wol->wolopts & ~WAKE_ALL)
+               return -EINVAL;
+
+       pdata->wol = wol->wolopts;
 
        device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
 
index 2cd71bdb6484c774659598fff1e99cd49181337b..f1b5201cc32075da27cf14d94b781c9f58c16189 100644 (file)
@@ -4506,6 +4506,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        if (!rtl_can_wakeup(tp))
                return -EOPNOTSUPP;
 
+       if (wol->wolopts & ~WAKE_ANY)
+               return -EINVAL;
+
        ret = usb_autopm_get_interface(tp->intf);
        if (ret < 0)
                goto out_set_wol;
index 05553d2524469f97e4a02bb48f43f6820ad2b3e5..ec287c9741e833eb2af7b2878ee08ff1941227b0 100644 (file)
@@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
        int ret;
 
+       if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+               return -EINVAL;
+
        pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
 
        ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
@@ -1517,6 +1520,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
        if (pdata) {
+               cancel_work_sync(&pdata->set_multicast);
                netif_dbg(dev, ifdown, dev->net, "free pdata\n");
                kfree(pdata);
                pdata = NULL;
index 06b4d290784dad95f893b63da62d26e020fc060a..262e7a3c23cb67fbfd66b81ed0d26af0f0480d84 100644 (file)
@@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
        int ret;
 
+       if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+               return -EINVAL;
+
        pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
 
        ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
index 9277a0f228dfa6de355c74d2652edcf2fb1d2f4b..35f39f23d88144195b8f007035f207d38b48c1fd 100644 (file)
@@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        struct usbnet *dev = netdev_priv(net);
        u8 opt = 0;
 
+       if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+               return -EINVAL;
+
        if (wolinfo->wolopts & WAKE_PHY)
                opt |= SR_MONITOR_LINK;
        if (wolinfo->wolopts & WAKE_MAGIC)
index 73aa33364d8053b041ae7ae8b6186010bddb9f47..504282af27e51f656192b729a0fbe3dcd13eb3bf 100644 (file)
@@ -802,7 +802,7 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
 int usbnet_stop (struct net_device *net)
 {
        struct usbnet           *dev = netdev_priv(net);
-       struct driver_info      *info = dev->driver_info;
+       const struct driver_info *info = dev->driver_info;
        int                     retval, pm, mpn;
 
        clear_bit(EVENT_DEV_OPEN, &dev->flags);
@@ -865,7 +865,7 @@ int usbnet_open (struct net_device *net)
 {
        struct usbnet           *dev = netdev_priv(net);
        int                     retval;
-       struct driver_info      *info = dev->driver_info;
+       const struct driver_info *info = dev->driver_info;
 
        if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
                netif_info(dev, ifup, dev->net,
@@ -1205,7 +1205,7 @@ fail_lowmem:
        }
 
        if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
-               struct driver_info      *info = dev->driver_info;
+               const struct driver_info *info = dev->driver_info;
                int                     retval = 0;
 
                clear_bit (EVENT_LINK_RESET, &dev->flags);
@@ -1353,7 +1353,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
        unsigned int                    length;
        struct urb              *urb = NULL;
        struct skb_data         *entry;
-       struct driver_info      *info = dev->driver_info;
+       const struct driver_info *info = dev->driver_info;
        unsigned long           flags;
        int retval;
 
@@ -1647,7 +1647,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        struct usbnet                   *dev;
        struct net_device               *net;
        struct usb_host_interface       *interface;
-       struct driver_info              *info;
+       const struct driver_info        *info;
        struct usb_device               *xdev;
        int                             status;
        const char                      *name;
@@ -1663,7 +1663,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        }
 
        name = udev->dev.driver->name;
-       info = (struct driver_info *) prod->driver_info;
+       info = (const struct driver_info *) prod->driver_info;
        if (!info) {
                dev_dbg (&udev->dev, "blacklisted by %s\n", name);
                return -ENODEV;
index 76592090522607a4bdf5422b1d49ec99c6fd68ac..dab504ec5e502be401cbfe9a8e3f0f572c0220ba 100644 (file)
@@ -1699,17 +1699,6 @@ static void virtnet_stats(struct net_device *dev,
        tot->rx_frame_errors = dev->stats.rx_frame_errors;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void virtnet_netpoll(struct net_device *dev)
-{
-       struct virtnet_info *vi = netdev_priv(dev);
-       int i;
-
-       for (i = 0; i < vi->curr_queue_pairs; i++)
-               napi_schedule(&vi->rq[i].napi);
-}
-#endif
-
 static void virtnet_ack_link_announce(struct virtnet_info *vi)
 {
        rtnl_lock();
@@ -2447,9 +2436,6 @@ static const struct net_device_ops virtnet_netdev = {
        .ndo_get_stats64     = virtnet_stats,
        .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = virtnet_netpoll,
-#endif
        .ndo_bpf                = virtnet_xdp,
        .ndo_xdp_xmit           = virtnet_xdp_xmit,
        .ndo_features_check     = passthru_features_check,
index f93547f257fbbe22ccb8da8d7eca6c6230b28d40..69b7227c637e516ce1da2a4970772a9bba96f0c0 100644 (file)
@@ -1215,8 +1215,19 @@ static int vrf_add_fib_rules(const struct net_device *dev)
                goto ipmr_err;
 #endif
 
+#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
+       err = vrf_fib_rule(dev, RTNL_FAMILY_IP6MR, true);
+       if (err < 0)
+               goto ip6mr_err;
+#endif
+
        return 0;
 
+#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
+ip6mr_err:
+       vrf_fib_rule(dev, RTNL_FAMILY_IPMR,  false);
+#endif
+
 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
 ipmr_err:
        vrf_fib_rule(dev, AF_INET6,  false);
index e5d236595206bdc9ac00a4b458f07524eef1e72d..fb0cdbba8d761efe3781a71db9834a4bbba5dcad 100644 (file)
@@ -3539,6 +3539,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
                nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
                nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TTL */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TTL_INHERIT */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TOS */
                nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_LEARNING */
@@ -3603,6 +3604,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
        }
 
        if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+           nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
+                      !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
            nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
            nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
            nla_put_u8(skb, IFLA_VXLAN_LEARNING,
index 094cea775d0c0bd3090102cf5d511d08d718fef7..ef298d8525c5481c8df55f933cf0df6452ba41c0 100644 (file)
@@ -257,7 +257,7 @@ static const struct
        [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
        [I2400M_MS_BUSY] = { "busy", -EBUSY },
        [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
-       [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ },
+       [I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ },
        [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
        [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
        [I2400M_MS_NO_RF] = { "no RF", -EIO },
index 54ff5930126c4dca93c00a240790a013ae27c2d8..6572a43590a8153d84d92adc5e9b225e98acbde2 100644 (file)
@@ -42,7 +42,8 @@ config ATH10K_USB
 
 config ATH10K_SNOC
        tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
-       depends on ATH10K && ARCH_QCOM
+       depends on ATH10K
+       depends on ARCH_QCOM || COMPILE_TEST
        ---help---
          This module adds support for integrated WCN3990 chip connected
          to system NOC(SNOC). Currently work in progress and will not
index c9bd0e2b5db7ed5a062fbb0a5d1abbdffc4e50ec..4cd69aca75e2029a5d70fd38c1c69b1d62c6fb28 100644 (file)
@@ -655,10 +655,10 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar)
        ath10k_ahb_irq_disable(ar);
        synchronize_irq(ar_ahb->irq);
 
-       ath10k_pci_flush(ar);
-
        napi_synchronize(&ar->napi);
        napi_disable(&ar->napi);
+
+       ath10k_pci_flush(ar);
 }
 
 static int ath10k_ahb_hif_power_up(struct ath10k *ar)
@@ -750,7 +750,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
        enum ath10k_hw_rev hw_rev;
        size_t size;
        int ret;
-       u32 chip_id;
+       struct ath10k_bus_params bus_params;
 
        of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev);
        if (!of_id) {
@@ -806,14 +806,15 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
 
        ath10k_pci_ce_deinit(ar);
 
-       chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
-       if (chip_id == 0xffffffff) {
+       bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+       bus_params.chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+       if (bus_params.chip_id == 0xffffffff) {
                ath10k_err(ar, "failed to get chip id\n");
                ret = -ENODEV;
                goto err_halt_device;
        }
 
-       ret = ath10k_core_register(ar, chip_id);
+       ret = ath10k_core_register(ar, &bus_params);
        if (ret) {
                ath10k_err(ar, "failed to register driver core: %d\n", ret);
                goto err_halt_device;
index af4978d6a14bbd48377755887ac2320961d4ba68..1750b182209b6f406dc97cd17bd3994d2ec6d00d 100644 (file)
@@ -459,3 +459,26 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
 
        return ret;
 }
+
+int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
+{
+       struct bmi_cmd cmd;
+       u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_warn(ar, "bmi set start command disallowed\n");
+               return -EBUSY;
+       }
+
+       cmd.id = __cpu_to_le32(BMI_SET_APP_START);
+       cmd.set_app_start.addr = __cpu_to_le32(address);
+
+       ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+       if (ret) {
+               ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
index 9a396817aa55a27c5e255259ba5152d28a91ae55..725c9afc63f290f12469893c8018e055415b1a8e 100644 (file)
@@ -86,6 +86,10 @@ enum bmi_cmd_id {
 #define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000
 #define BMI_PARAM_FLASH_SECTION_ALL 0x10000
 
+/* Dual-band Extended Board ID */
+#define BMI_PARAM_GET_EXT_BOARD_ID 0x40000
+#define ATH10K_BMI_EXT_BOARD_ID_SUPPORT 0x40000
+
 #define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK   0x7c00
 #define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB    10
 
@@ -93,6 +97,7 @@ enum bmi_cmd_id {
 #define ATH10K_BMI_CHIP_ID_FROM_OTP_LSB     15
 
 #define ATH10K_BMI_BOARD_ID_STATUS_MASK 0xff
+#define ATH10K_BMI_EBOARD_ID_STATUS_MASK 0xff
 
 struct bmi_cmd {
        __le32 id; /* enum bmi_cmd_id */
@@ -190,6 +195,35 @@ struct bmi_target_info {
        u32 type;
 };
 
+struct bmi_segmented_file_header {
+       __le32 magic_num;
+       __le32 file_flags;
+       u8 data[];
+};
+
+struct bmi_segmented_metadata {
+       __le32 addr;
+       __le32 length;
+       u8 data[];
+};
+
+#define BMI_SGMTFILE_MAGIC_NUM          0x544d4753 /* "SGMT" */
+#define BMI_SGMTFILE_FLAG_COMPRESS      1
+
+/* Special values for bmi_segmented_metadata.length (all have high bit set) */
+
+/* end of segmented data */
+#define BMI_SGMTFILE_DONE               0xffffffff
+
+/* Board Data segment */
+#define BMI_SGMTFILE_BDDATA             0xfffffffe
+
+/* set beginning address */
+#define BMI_SGMTFILE_BEGINADDR          0xfffffffd
+
+/* immediate function execution */
+#define BMI_SGMTFILE_EXEC               0xfffffffc
+
 /* in jiffies */
 #define BMI_COMMUNICATION_TIMEOUT_HZ (3 * HZ)
 
@@ -239,4 +273,6 @@ int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
                             const void *buffer, u32 length);
 int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val);
 int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val);
+int ath10k_bmi_set_start(struct ath10k *ar, u32 address);
+
 #endif /* _BMI_H_ */
index 18c709c484e738cd02a0c7cd373c0f485b5170f9..f6d3ecbdd3a32c42a6715247e0aee28ee018d1b4 100644 (file)
@@ -1280,10 +1280,17 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
 
 int ath10k_ce_disable_interrupts(struct ath10k *ar)
 {
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+       struct ath10k_ce_pipe *ce_state;
+       u32 ctrl_addr;
        int ce_id;
 
        for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
-               u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+               ce_state  = &ce->ce_states[ce_id];
+               if (ce_state->attr_flags & CE_ATTR_POLL)
+                       continue;
+
+               ctrl_addr = ath10k_ce_base_address(ar, ce_id);
 
                ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
                ath10k_ce_error_intr_disable(ar, ctrl_addr);
@@ -1300,11 +1307,14 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar)
        int ce_id;
        struct ath10k_ce_pipe *ce_state;
 
-       /* Skip the last copy engine, CE7 the diagnostic window, as that
-        * uses polling and isn't initialized for interrupts.
+       /* Enable interrupts for copy engine that
+        * are not using polling mode.
         */
-       for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) {
+       for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
                ce_state  = &ce->ce_states[ce_id];
+               if (ce_state->attr_flags & CE_ATTR_POLL)
+                       continue;
+
                ath10k_ce_per_engine_handler_adjust(ce_state);
        }
 }
@@ -1416,10 +1426,8 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
 
        nentries = roundup_pow_of_two(nentries);
 
-       src_ring = kzalloc(sizeof(*src_ring) +
-                          (nentries *
-                           sizeof(*src_ring->per_transfer_context)),
-                          GFP_KERNEL);
+       src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
+                                      nentries), GFP_KERNEL);
        if (src_ring == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -1476,10 +1484,8 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
 
        nentries = roundup_pow_of_two(nentries);
 
-       src_ring = kzalloc(sizeof(*src_ring) +
-                          (nentries *
-                           sizeof(*src_ring->per_transfer_context)),
-                          GFP_KERNEL);
+       src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
+                                      nentries), GFP_KERNEL);
        if (!src_ring)
                return ERR_PTR(-ENOMEM);
 
@@ -1534,10 +1540,8 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
 
        nentries = roundup_pow_of_two(attr->dest_nentries);
 
-       dest_ring = kzalloc(sizeof(*dest_ring) +
-                           (nentries *
-                            sizeof(*dest_ring->per_transfer_context)),
-                           GFP_KERNEL);
+       dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
+                                       nentries), GFP_KERNEL);
        if (dest_ring == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -1580,10 +1584,8 @@ ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
 
        nentries = roundup_pow_of_two(attr->dest_nentries);
 
-       dest_ring = kzalloc(sizeof(*dest_ring) +
-                           (nentries *
-                            sizeof(*dest_ring->per_transfer_context)),
-                           GFP_KERNEL);
+       dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
+                                       nentries), GFP_KERNEL);
        if (!dest_ring)
                return ERR_PTR(-ENOMEM);
 
index b8fb5382dedeb9830142f83d5b674f8d445c8e98..ead9987c3259fc3683513aae4eaa8228f6f47cc1 100644 (file)
@@ -275,16 +275,19 @@ void ath10k_ce_free_rri(struct ath10k *ar);
 
 /* ce_attr.flags values */
 /* Use NonSnooping PCIe accesses? */
-#define CE_ATTR_NO_SNOOP               1
+#define CE_ATTR_NO_SNOOP               BIT(0)
 
 /* Byte swap data words */
-#define CE_ATTR_BYTE_SWAP_DATA         2
+#define CE_ATTR_BYTE_SWAP_DATA         BIT(1)
 
 /* Swizzle descriptors? */
-#define CE_ATTR_SWIZZLE_DESCRIPTORS    4
+#define CE_ATTR_SWIZZLE_DESCRIPTORS    BIT(2)
 
 /* no interrupt on copy completion */
-#define CE_ATTR_DIS_INTR               8
+#define CE_ATTR_DIS_INTR               BIT(3)
+
+/* no interrupt, only polling */
+#define CE_ATTR_POLL                   BIT(4)
 
 /* Attributes of an instance of a Copy Engine */
 struct ce_attr {
index c40cd129afe7b2c006fff3d8b270add0a3843d32..203f30992c26b2225567e15c54b7f3f2f521e706 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/firmware.h>
 #include <linux/of.h>
+#include <linux/property.h>
 #include <linux/dmi.h>
 #include <linux/ctype.h>
 #include <asm/byteorder.h>
@@ -63,6 +64,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
        {
                .id = QCA988X_HW_2_0_VERSION,
                .dev_id = QCA988X_2_0_DEVICE_ID,
+               .bus = ATH10K_BUS_PCI,
                .name = "qca988x hw2.0",
                .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
                .uart_pin = 7,
@@ -84,13 +86,14 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
-               .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
                .target_64bit = false,
                .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
                .shadow_reg_support = false,
                .rri_on_ddr = false,
+               .hw_filter_reset_required = true,
+               .fw_diag_ce_download = false,
        },
        {
                .id = QCA988X_HW_2_0_VERSION,
@@ -116,7 +119,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
-               .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
                .target_64bit = false,
@@ -124,10 +126,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .per_ce_irq = false,
                .shadow_reg_support = false,
                .rri_on_ddr = false,
+               .hw_filter_reset_required = true,
+               .fw_diag_ce_download = false,
        },
        {
                .id = QCA9887_HW_1_0_VERSION,
                .dev_id = QCA9887_1_0_DEVICE_ID,
+               .bus = ATH10K_BUS_PCI,
                .name = "qca9887 hw1.0",
                .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR,
                .uart_pin = 7,
@@ -149,7 +154,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
-               .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
                .target_64bit = false,
@@ -157,10 +161,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .per_ce_irq = false,
                .shadow_reg_support = false,
                .rri_on_ddr = false,
+               .hw_filter_reset_required = true,
+               .fw_diag_ce_download = false,
        },
        {
                .id = QCA6174_HW_2_1_VERSION,
                .dev_id = QCA6164_2_1_DEVICE_ID,
+               .bus = ATH10K_BUS_PCI,
                .name = "qca6164 hw2.1",
                .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
                .uart_pin = 6,
@@ -181,7 +188,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
-               .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
                .target_64bit = false,
@@ -189,10 +195,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .per_ce_irq = false,
                .shadow_reg_support = false,
                .rri_on_ddr = false,
+               .hw_filter_reset_required = true,
+               .fw_diag_ce_download = false,
        },
        {
                .id = QCA6174_HW_2_1_VERSION,
                .dev_id = QCA6174_2_1_DEVICE_ID,
+               .bus = ATH10K_BUS_PCI,
                .name = "qca6174 hw2.1",
                .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
                .uart_pin = 6,
@@ -213,7 +222,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
-               .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
                .target_64bit = false,
@@ -221,10 +229,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .per_ce_irq = false,
                .shadow_reg_support = false,
                .rri_on_ddr = false,
+               .hw_filter_reset_required = true,
+               .fw_diag_ce_download = false,
        },
        {
                .id = QCA6174_HW_3_0_VERSION,
                .dev_id = QCA6174_2_1_DEVICE_ID,
+               .bus = ATH10K_BUS_PCI,
                .name = "qca6174 hw3.0",
                .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
                .uart_pin = 6,
@@ -245,7 +256,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
-               .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
                .target_64bit = false,
@@ -253,10 +263,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .per_ce_irq = false,
                .shadow_reg_support = false,
                .rri_on_ddr = false,
+               .hw_filter_reset_required = true,
+               .fw_diag_ce_download = false,
        },
        {
                .id = QCA6174_HW_3_2_VERSION,
                .dev_id = QCA6174_2_1_DEVICE_ID,
+               .bus = ATH10K_BUS_PCI,
                .name = "qca6174 hw3.2",
                .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
                .uart_pin = 6,
@@ -280,7 +293,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
-               .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
                .target_64bit = false,
@@ -288,10 +300,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .per_ce_irq = false,
                .shadow_reg_support = false,
                .rri_on_ddr = false,
+               .hw_filter_reset_required = true,
+               .fw_diag_ce_download = true,
        },
        {
                .id = QCA99X0_HW_2_0_DEV_VERSION,
                .dev_id = QCA99X0_2_0_DEVICE_ID,
+               .bus = ATH10K_BUS_PCI,
                .name = "qca99x0 hw2.0",
                .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
                .uart_pin = 7,
@@ -318,7 +333,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 11,
-               .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
                .target_64bit = false,
@@ -326,10 +340,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .per_ce_irq = false,
                .shadow_reg_support = false,
                .rri_on_ddr = false,
+               .hw_filter_reset_required = true,
+               .fw_diag_ce_download = false,
        },
        {
                .id = QCA9984_HW_1_0_DEV_VERSION,
                .dev_id = QCA9984_1_0_DEVICE_ID,
+               .bus = ATH10K_BUS_PCI,
                .name = "qca9984/qca9994 hw1.0",
                .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
                .uart_pin = 7,
@@ -346,8 +363,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .fw = {
                        .dir = QCA9984_HW_1_0_FW_DIR,
                        .board = QCA9984_HW_1_0_BOARD_DATA_FILE,
+                       .eboard = QCA9984_HW_1_0_EBOARD_DATA_FILE,
                        .board_size = QCA99X0_BOARD_DATA_SZ,
                        .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+                       .ext_board_size = QCA99X0_EXT_BOARD_DATA_SZ,
                },
                .sw_decrypt_mcast_mgmt = true,
                .hw_ops = &qca99x0_ops,
@@ -361,7 +380,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 1560,
                .vht160_mcs_tx_highest = 1560,
                .n_cipher_suites = 11,
-               .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
                .target_64bit = false,
@@ -369,10 +387,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .per_ce_irq = false,
                .shadow_reg_support = false,
                .rri_on_ddr = false,
+               .hw_filter_reset_required = true,
+               .fw_diag_ce_download = false,
        },
        {
                .id = QCA9888_HW_2_0_DEV_VERSION,
                .dev_id = QCA9888_2_0_DEVICE_ID,
+               .bus = ATH10K_BUS_PCI,
                .name = "qca9888 hw2.0",
                .patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR,
                .uart_pin = 7,
@@ -403,7 +424,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 780,
                .vht160_mcs_tx_highest = 780,
                .n_cipher_suites = 11,
-               .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
                .target_64bit = false,
@@ -411,10 +431,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .per_ce_irq = false,
                .shadow_reg_support = false,
                .rri_on_ddr = false,
+               .hw_filter_reset_required = true,
+               .fw_diag_ce_download = false,
        },
        {
                .id = QCA9377_HW_1_0_DEV_VERSION,
                .dev_id = QCA9377_1_0_DEVICE_ID,
+               .bus = ATH10K_BUS_PCI,
                .name = "qca9377 hw1.0",
                .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
                .uart_pin = 6,
@@ -435,7 +458,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
-               .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
                .target_64bit = false,
@@ -443,10 +465,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .per_ce_irq = false,
                .shadow_reg_support = false,
                .rri_on_ddr = false,
+               .hw_filter_reset_required = true,
+               .fw_diag_ce_download = false,
        },
        {
                .id = QCA9377_HW_1_1_DEV_VERSION,
                .dev_id = QCA9377_1_0_DEVICE_ID,
+               .bus = ATH10K_BUS_PCI,
                .name = "qca9377 hw1.1",
                .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
                .uart_pin = 6,
@@ -469,7 +494,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
-               .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
                .target_64bit = false,
@@ -477,10 +501,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .per_ce_irq = false,
                .shadow_reg_support = false,
                .rri_on_ddr = false,
+               .hw_filter_reset_required = true,
+               .fw_diag_ce_download = true,
        },
        {
                .id = QCA4019_HW_1_0_DEV_VERSION,
                .dev_id = 0,
+               .bus = ATH10K_BUS_AHB,
                .name = "qca4019 hw1.0",
                .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
                .uart_pin = 7,
@@ -508,7 +535,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 11,
-               .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
                .target_64bit = false,
@@ -516,10 +542,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .per_ce_irq = false,
                .shadow_reg_support = false,
                .rri_on_ddr = false,
+               .hw_filter_reset_required = true,
+               .fw_diag_ce_download = false,
        },
        {
                .id = WCN3990_HW_1_0_DEV_VERSION,
                .dev_id = 0,
+               .bus = ATH10K_BUS_PCI,
                .name = "wcn3990 hw1.0",
                .continuous_frag_desc = true,
                .tx_chain_mask = 0x7,
@@ -539,6 +568,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .per_ce_irq = true,
                .shadow_reg_support = true,
                .rri_on_ddr = true,
+               .hw_filter_reset_required = false,
+               .fw_diag_ce_download = false,
        },
 };
 
@@ -762,153 +793,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
        return 0;
 }
 
-static int ath10k_download_board_data(struct ath10k *ar, const void *data,
-                                     size_t data_len)
-{
-       u32 board_data_size = ar->hw_params.fw.board_size;
-       u32 address;
-       int ret;
-
-       ret = ath10k_push_board_ext_data(ar, data, data_len);
-       if (ret) {
-               ath10k_err(ar, "could not push board ext data (%d)\n", ret);
-               goto exit;
-       }
-
-       ret = ath10k_bmi_read32(ar, hi_board_data, &address);
-       if (ret) {
-               ath10k_err(ar, "could not read board data addr (%d)\n", ret);
-               goto exit;
-       }
-
-       ret = ath10k_bmi_write_memory(ar, address, data,
-                                     min_t(u32, board_data_size,
-                                           data_len));
-       if (ret) {
-               ath10k_err(ar, "could not write board data (%d)\n", ret);
-               goto exit;
-       }
-
-       ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
-       if (ret) {
-               ath10k_err(ar, "could not write board data bit (%d)\n", ret);
-               goto exit;
-       }
-
-exit:
-       return ret;
-}
-
-static int ath10k_download_cal_file(struct ath10k *ar,
-                                   const struct firmware *file)
-{
-       int ret;
-
-       if (!file)
-               return -ENOENT;
-
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       ret = ath10k_download_board_data(ar, file->data, file->size);
-       if (ret) {
-               ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
-               return ret;
-       }
-
-       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n");
-
-       return 0;
-}
-
-static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name)
-{
-       struct device_node *node;
-       int data_len;
-       void *data;
-       int ret;
-
-       node = ar->dev->of_node;
-       if (!node)
-               /* Device Tree is optional, don't print any warnings if
-                * there's no node for ath10k.
-                */
-               return -ENOENT;
-
-       if (!of_get_property(node, dt_name, &data_len)) {
-               /* The calibration data node is optional */
-               return -ENOENT;
-       }
-
-       if (data_len != ar->hw_params.cal_data_len) {
-               ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
-                           data_len);
-               ret = -EMSGSIZE;
-               goto out;
-       }
-
-       data = kmalloc(data_len, GFP_KERNEL);
-       if (!data) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       ret = of_property_read_u8_array(node, dt_name, data, data_len);
-       if (ret) {
-               ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
-                           ret);
-               goto out_free;
-       }
-
-       ret = ath10k_download_board_data(ar, data, data_len);
-       if (ret) {
-               ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n",
-                           ret);
-               goto out_free;
-       }
-
-       ret = 0;
-
-out_free:
-       kfree(data);
-
-out:
-       return ret;
-}
-
-static int ath10k_download_cal_eeprom(struct ath10k *ar)
-{
-       size_t data_len;
-       void *data = NULL;
-       int ret;
-
-       ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len);
-       if (ret) {
-               if (ret != -EOPNOTSUPP)
-                       ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n",
-                                   ret);
-               goto out_free;
-       }
-
-       ret = ath10k_download_board_data(ar, data, data_len);
-       if (ret) {
-               ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n",
-                           ret);
-               goto out_free;
-       }
-
-       ret = 0;
-
-out_free:
-       kfree(data);
-
-       return ret;
-}
-
 static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
 {
        u32 result, address;
        u8 board_id, chip_id;
+       bool ext_bid_support;
        int ret, bmi_board_id_param;
 
        address = ar->hw_params.patch_load_addr;
@@ -948,10 +837,13 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
 
        board_id = MS(result, ATH10K_BMI_BOARD_ID_FROM_OTP);
        chip_id = MS(result, ATH10K_BMI_CHIP_ID_FROM_OTP);
+       ext_bid_support = (result & ATH10K_BMI_EXT_BOARD_ID_SUPPORT);
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT,
-                  "boot get otp board id result 0x%08x board_id %d chip_id %d\n",
-                  result, board_id, chip_id);
+                  "boot get otp board id result 0x%08x board_id %d chip_id %d ext_bid_support %d\n",
+                  result, board_id, chip_id, ext_bid_support);
+
+       ar->id.ext_bid_supported = ext_bid_support;
 
        if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 ||
            (board_id == 0)) {
@@ -1055,64 +947,6 @@ static int ath10k_core_check_dt(struct ath10k *ar)
        return 0;
 }
 
-static int ath10k_download_and_run_otp(struct ath10k *ar)
-{
-       u32 result, address = ar->hw_params.patch_load_addr;
-       u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
-       int ret;
-
-       ret = ath10k_download_board_data(ar,
-                                        ar->running_fw->board_data,
-                                        ar->running_fw->board_len);
-       if (ret) {
-               ath10k_err(ar, "failed to download board data: %d\n", ret);
-               return ret;
-       }
-
-       /* OTP is optional */
-
-       if (!ar->running_fw->fw_file.otp_data ||
-           !ar->running_fw->fw_file.otp_len) {
-               ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
-                           ar->running_fw->fw_file.otp_data,
-                           ar->running_fw->fw_file.otp_len);
-               return 0;
-       }
-
-       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
-                  address, ar->running_fw->fw_file.otp_len);
-
-       ret = ath10k_bmi_fast_download(ar, address,
-                                      ar->running_fw->fw_file.otp_data,
-                                      ar->running_fw->fw_file.otp_len);
-       if (ret) {
-               ath10k_err(ar, "could not write otp (%d)\n", ret);
-               return ret;
-       }
-
-       /* As of now pre-cal is valid for 10_4 variants */
-       if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
-           ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
-               bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL;
-
-       ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
-       if (ret) {
-               ath10k_err(ar, "could not execute otp (%d)\n", ret);
-               return ret;
-       }
-
-       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
-
-       if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
-                                  ar->running_fw->fw_file.fw_features)) &&
-           result != 0) {
-               ath10k_err(ar, "otp calibration failed: %d", result);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static int ath10k_download_fw(struct ath10k *ar)
 {
        u32 address, data_len;
@@ -1135,14 +969,24 @@ static int ath10k_download_fw(struct ath10k *ar)
                   "boot uploading firmware image %pK len %d\n",
                   data, data_len);
 
-       ret = ath10k_bmi_fast_download(ar, address, data, data_len);
-       if (ret) {
-               ath10k_err(ar, "failed to download firmware: %d\n",
-                          ret);
-               return ret;
+       /* Check if device supports to download firmware via
+        * diag copy engine. Downloading firmware via diag CE
+        * greatly reduces the time to download firmware.
+        */
+       if (ar->hw_params.fw_diag_ce_download) {
+               ret = ath10k_hw_diag_fast_download(ar, address,
+                                                  data, data_len);
+               if (ret == 0)
+                       /* firmware upload via diag ce was successful */
+                       return 0;
+
+               ath10k_warn(ar,
+                           "failed to upload firmware via diag ce, trying BMI: %d",
+                           ret);
        }
 
-       return ret;
+       return ath10k_bmi_fast_download(ar, address,
+                                       data, data_len);
 }
 
 static void ath10k_core_free_board_files(struct ath10k *ar)
@@ -1150,9 +994,15 @@ static void ath10k_core_free_board_files(struct ath10k *ar)
        if (!IS_ERR(ar->normal_mode_fw.board))
                release_firmware(ar->normal_mode_fw.board);
 
+       if (!IS_ERR(ar->normal_mode_fw.ext_board))
+               release_firmware(ar->normal_mode_fw.ext_board);
+
        ar->normal_mode_fw.board = NULL;
        ar->normal_mode_fw.board_data = NULL;
        ar->normal_mode_fw.board_len = 0;
+       ar->normal_mode_fw.ext_board = NULL;
+       ar->normal_mode_fw.ext_board_data = NULL;
+       ar->normal_mode_fw.ext_board_len = 0;
 }
 
 static void ath10k_core_free_firmware_files(struct ath10k *ar)
@@ -1206,28 +1056,47 @@ success:
        return 0;
 }
 
-static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar)
+static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type)
 {
-       if (!ar->hw_params.fw.board) {
-               ath10k_err(ar, "failed to find board file fw entry\n");
-               return -EINVAL;
-       }
+       const struct firmware *fw;
 
-       ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
-                                                       ar->hw_params.fw.dir,
-                                                       ar->hw_params.fw.board);
-       if (IS_ERR(ar->normal_mode_fw.board))
-               return PTR_ERR(ar->normal_mode_fw.board);
+       if (bd_ie_type == ATH10K_BD_IE_BOARD) {
+               if (!ar->hw_params.fw.board) {
+                       ath10k_err(ar, "failed to find board file fw entry\n");
+                       return -EINVAL;
+               }
+
+               ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+                                                               ar->hw_params.fw.dir,
+                                                               ar->hw_params.fw.board);
+               if (IS_ERR(ar->normal_mode_fw.board))
+                       return PTR_ERR(ar->normal_mode_fw.board);
+
+               ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
+               ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
+       } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) {
+               if (!ar->hw_params.fw.eboard) {
+                       ath10k_err(ar, "failed to find eboard file fw entry\n");
+                       return -EINVAL;
+               }
 
-       ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
-       ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
+               fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+                                         ar->hw_params.fw.eboard);
+               ar->normal_mode_fw.ext_board = fw;
+               if (IS_ERR(ar->normal_mode_fw.ext_board))
+                       return PTR_ERR(ar->normal_mode_fw.ext_board);
+
+               ar->normal_mode_fw.ext_board_data = ar->normal_mode_fw.ext_board->data;
+               ar->normal_mode_fw.ext_board_len = ar->normal_mode_fw.ext_board->size;
+       }
 
        return 0;
 }
 
 static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
                                         const void *buf, size_t buf_len,
-                                        const char *boardname)
+                                        const char *boardname,
+                                        int bd_ie_type)
 {
        const struct ath10k_fw_ie *hdr;
        bool name_match_found;
@@ -1276,12 +1145,21 @@ static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
                                /* no match found */
                                break;
 
-                       ath10k_dbg(ar, ATH10K_DBG_BOOT,
-                                  "boot found board data for '%s'",
-                                  boardname);
+                       if (bd_ie_type == ATH10K_BD_IE_BOARD) {
+                               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                                          "boot found board data for '%s'",
+                                               boardname);
 
-                       ar->normal_mode_fw.board_data = board_ie_data;
-                       ar->normal_mode_fw.board_len = board_ie_len;
+                               ar->normal_mode_fw.board_data = board_ie_data;
+                               ar->normal_mode_fw.board_len = board_ie_len;
+                       } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) {
+                               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                                          "boot found eboard data for '%s'",
+                                               boardname);
+
+                               ar->normal_mode_fw.ext_board_data = board_ie_data;
+                               ar->normal_mode_fw.ext_board_len = board_ie_len;
+                       }
 
                        ret = 0;
                        goto out;
@@ -1331,7 +1209,18 @@ static int ath10k_core_search_bd(struct ath10k *ar,
                switch (ie_id) {
                case ATH10K_BD_IE_BOARD:
                        ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
-                                                           boardname);
+                                                           boardname,
+                                                           ATH10K_BD_IE_BOARD);
+                       if (ret == -ENOENT)
+                               /* no match found, continue */
+                               break;
+
+                       /* either found or error, so stop searching */
+                       goto out;
+               case ATH10K_BD_IE_BOARD_EXT:
+                       ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
+                                                           boardname,
+                                                           ATH10K_BD_IE_BOARD_EXT);
                        if (ret == -ENOENT)
                                /* no match found, continue */
                                break;
@@ -1361,9 +1250,11 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
        const u8 *data;
        int ret;
 
-       ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
-                                                       ar->hw_params.fw.dir,
-                                                       filename);
+       /* Skip if already fetched during board data download */
+       if (!ar->normal_mode_fw.board)
+               ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+                                                               ar->hw_params.fw.dir,
+                                                               filename);
        if (IS_ERR(ar->normal_mode_fw.board))
                return PTR_ERR(ar->normal_mode_fw.board);
 
@@ -1451,23 +1342,49 @@ out:
        return 0;
 }
 
-static int ath10k_core_fetch_board_file(struct ath10k *ar)
+static int ath10k_core_create_eboard_name(struct ath10k *ar, char *name,
+                                         size_t name_len)
+{
+       if (ar->id.bmi_ids_valid) {
+               scnprintf(name, name_len,
+                         "bus=%s,bmi-chip-id=%d,bmi-eboard-id=%d",
+                         ath10k_bus_str(ar->hif.bus),
+                         ar->id.bmi_chip_id,
+                         ar->id.bmi_eboard_id);
+
+               ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using eboard name '%s'\n", name);
+               return 0;
+       }
+       /* Fallback if returned board id is zero */
+       return -1;
+}
+
+static int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type)
 {
        char boardname[100], fallback_boardname[100];
        int ret;
 
-       ret = ath10k_core_create_board_name(ar, boardname,
-                                           sizeof(boardname), true);
-       if (ret) {
-               ath10k_err(ar, "failed to create board name: %d", ret);
-               return ret;
-       }
+       if (bd_ie_type == ATH10K_BD_IE_BOARD) {
+               ret = ath10k_core_create_board_name(ar, boardname,
+                                                   sizeof(boardname), true);
+               if (ret) {
+                       ath10k_err(ar, "failed to create board name: %d", ret);
+                       return ret;
+               }
 
-       ret = ath10k_core_create_board_name(ar, fallback_boardname,
-                                           sizeof(boardname), false);
-       if (ret) {
-               ath10k_err(ar, "failed to create fallback board name: %d", ret);
-               return ret;
+               ret = ath10k_core_create_board_name(ar, fallback_boardname,
+                                                   sizeof(boardname), false);
+               if (ret) {
+                       ath10k_err(ar, "failed to create fallback board name: %d", ret);
+                       return ret;
+               }
+       } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) {
+               ret = ath10k_core_create_eboard_name(ar, boardname,
+                                                    sizeof(boardname));
+               if (ret) {
+                       ath10k_err(ar, "fallback to eboard.bin since board id 0");
+                       goto fallback;
+               }
        }
 
        ar->bd_api = 2;
@@ -1477,8 +1394,9 @@ static int ath10k_core_fetch_board_file(struct ath10k *ar)
        if (!ret)
                goto success;
 
+fallback:
        ar->bd_api = 1;
-       ret = ath10k_core_fetch_board_data_api_1(ar);
+       ret = ath10k_core_fetch_board_data_api_1(ar, bd_ie_type);
        if (ret) {
                ath10k_err(ar, "failed to fetch board-2.bin or board.bin from %s\n",
                           ar->hw_params.fw.dir);
@@ -1490,6 +1408,291 @@ success:
        return 0;
 }
 
+static int ath10k_core_get_ext_board_id_from_otp(struct ath10k *ar)
+{
+       u32 result, address;
+       u8 ext_board_id;
+       int ret;
+
+       address = ar->hw_params.patch_load_addr;
+
+       if (!ar->normal_mode_fw.fw_file.otp_data ||
+           !ar->normal_mode_fw.fw_file.otp_len) {
+               ath10k_warn(ar,
+                           "failed to retrieve extended board id due to otp binary missing\n");
+               return -ENODATA;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                  "boot upload otp to 0x%x len %zd for ext board id\n",
+                  address, ar->normal_mode_fw.fw_file.otp_len);
+
+       ret = ath10k_bmi_fast_download(ar, address,
+                                      ar->normal_mode_fw.fw_file.otp_data,
+                                      ar->normal_mode_fw.fw_file.otp_len);
+       if (ret) {
+               ath10k_err(ar, "could not write otp for ext board id check: %d\n",
+                          ret);
+               return ret;
+       }
+
+       ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EXT_BOARD_ID, &result);
+       if (ret) {
+               ath10k_err(ar, "could not execute otp for ext board id check: %d\n",
+                          ret);
+               return ret;
+       }
+
+       if (!result) {
+               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                          "ext board id does not exist in otp, ignore it\n");
+               return -EOPNOTSUPP;
+       }
+
+       ext_board_id = result & ATH10K_BMI_EBOARD_ID_STATUS_MASK;
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                  "boot get otp ext board id result 0x%08x ext_board_id %d\n",
+                  result, ext_board_id);
+
+       ar->id.bmi_eboard_id = ext_board_id;
+
+       return 0;
+}
+
+static int ath10k_download_board_data(struct ath10k *ar, const void *data,
+                                     size_t data_len)
+{
+       u32 board_data_size = ar->hw_params.fw.board_size;
+       u32 eboard_data_size = ar->hw_params.fw.ext_board_size;
+       u32 board_address;
+       u32 ext_board_address;
+       int ret;
+
+       ret = ath10k_push_board_ext_data(ar, data, data_len);
+       if (ret) {
+               ath10k_err(ar, "could not push board ext data (%d)\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_bmi_read32(ar, hi_board_data, &board_address);
+       if (ret) {
+               ath10k_err(ar, "could not read board data addr (%d)\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_bmi_write_memory(ar, board_address, data,
+                                     min_t(u32, board_data_size,
+                                           data_len));
+       if (ret) {
+               ath10k_err(ar, "could not write board data (%d)\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
+       if (ret) {
+               ath10k_err(ar, "could not write board data bit (%d)\n", ret);
+               goto exit;
+       }
+
+       if (!ar->id.ext_bid_supported)
+               goto exit;
+
+       /* Extended board data download */
+       ret = ath10k_core_get_ext_board_id_from_otp(ar);
+       if (ret == -EOPNOTSUPP) {
+               /* Not fetching ext_board_data if ext board id is 0 */
+               ath10k_dbg(ar, ATH10K_DBG_BOOT, "otp returned ext board id 0\n");
+               return 0;
+       } else if (ret) {
+               ath10k_err(ar, "failed to get extended board id: %d\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_core_fetch_board_file(ar, ATH10K_BD_IE_BOARD_EXT);
+       if (ret)
+               goto exit;
+
+       if (ar->normal_mode_fw.ext_board_data) {
+               ext_board_address = board_address + EXT_BOARD_ADDRESS_OFFSET;
+               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                          "boot writing ext board data to addr 0x%x",
+                          ext_board_address);
+               ret = ath10k_bmi_write_memory(ar, ext_board_address,
+                                             ar->normal_mode_fw.ext_board_data,
+                                             min_t(u32, eboard_data_size, data_len));
+               if (ret)
+                       ath10k_err(ar, "failed to write ext board data: %d\n", ret);
+       }
+
+exit:
+       return ret;
+}
+
+static int ath10k_download_and_run_otp(struct ath10k *ar)
+{
+       u32 result, address = ar->hw_params.patch_load_addr;
+       u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
+       int ret;
+
+       ret = ath10k_download_board_data(ar,
+                                        ar->running_fw->board_data,
+                                        ar->running_fw->board_len);
+       if (ret) {
+               ath10k_err(ar, "failed to download board data: %d\n", ret);
+               return ret;
+       }
+
+       /* OTP is optional */
+
+       if (!ar->running_fw->fw_file.otp_data ||
+           !ar->running_fw->fw_file.otp_len) {
+               ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
+                           ar->running_fw->fw_file.otp_data,
+                           ar->running_fw->fw_file.otp_len);
+               return 0;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
+                  address, ar->running_fw->fw_file.otp_len);
+
+       ret = ath10k_bmi_fast_download(ar, address,
+                                      ar->running_fw->fw_file.otp_data,
+                                      ar->running_fw->fw_file.otp_len);
+       if (ret) {
+               ath10k_err(ar, "could not write otp (%d)\n", ret);
+               return ret;
+       }
+
+       /* As of now pre-cal is valid for 10_4 variants */
+       if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
+           ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
+               bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL;
+
+       ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
+       if (ret) {
+               ath10k_err(ar, "could not execute otp (%d)\n", ret);
+               return ret;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
+
+       if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+                                  ar->running_fw->fw_file.fw_features)) &&
+           result != 0) {
+               ath10k_err(ar, "otp calibration failed: %d", result);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int ath10k_download_cal_file(struct ath10k *ar,
+                                   const struct firmware *file)
+{
+       int ret;
+
+       if (!file)
+               return -ENOENT;
+
+       if (IS_ERR(file))
+               return PTR_ERR(file);
+
+       ret = ath10k_download_board_data(ar, file->data, file->size);
+       if (ret) {
+               ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
+               return ret;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n");
+
+       return 0;
+}
+
+static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name)
+{
+       struct device_node *node;
+       int data_len;
+       void *data;
+       int ret;
+
+       node = ar->dev->of_node;
+       if (!node)
+               /* Device Tree is optional, don't print any warnings if
+                * there's no node for ath10k.
+                */
+               return -ENOENT;
+
+       if (!of_get_property(node, dt_name, &data_len)) {
+               /* The calibration data node is optional */
+               return -ENOENT;
+       }
+
+       if (data_len != ar->hw_params.cal_data_len) {
+               ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
+                           data_len);
+               ret = -EMSGSIZE;
+               goto out;
+       }
+
+       data = kmalloc(data_len, GFP_KERNEL);
+       if (!data) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = of_property_read_u8_array(node, dt_name, data, data_len);
+       if (ret) {
+               ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
+                           ret);
+               goto out_free;
+       }
+
+       ret = ath10k_download_board_data(ar, data, data_len);
+       if (ret) {
+               ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n",
+                           ret);
+               goto out_free;
+       }
+
+       ret = 0;
+
+out_free:
+       kfree(data);
+
+out:
+       return ret;
+}
+
+static int ath10k_download_cal_eeprom(struct ath10k *ar)
+{
+       size_t data_len;
+       void *data = NULL;
+       int ret;
+
+       ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len);
+       if (ret) {
+               if (ret != -EOPNOTSUPP)
+                       ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n",
+                                   ret);
+               goto out_free;
+       }
+
+       ret = ath10k_download_board_data(ar, data, data_len);
+       if (ret) {
+               ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n",
+                           ret);
+               goto out_free;
+       }
+
+       ret = 0;
+
+out_free:
+       kfree(data);
+
+       return ret;
+}
+
 int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
                                     struct ath10k_fw_file *fw_file)
 {
@@ -1882,7 +2085,8 @@ static int ath10k_init_hw_params(struct ath10k *ar)
        for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
                hw_params = &ath10k_hw_params_list[i];
 
-               if (hw_params->id == ar->target_version &&
+               if (hw_params->bus == ar->hif.bus &&
+                   hw_params->id == ar->target_version &&
                    hw_params->dev_id == ar->dev_id)
                        break;
        }
@@ -1983,6 +2187,7 @@ static void ath10k_core_set_coverage_class_work(struct work_struct *work)
 static int ath10k_core_init_firmware_features(struct ath10k *ar)
 {
        struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+       int max_num_peers;
 
        if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) &&
            !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
@@ -2062,7 +2267,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
 
        switch (fw_file->wmi_op_version) {
        case ATH10K_FW_WMI_OP_VERSION_MAIN:
-               ar->max_num_peers = TARGET_NUM_PEERS;
+               max_num_peers = TARGET_NUM_PEERS;
                ar->max_num_stations = TARGET_NUM_STATIONS;
                ar->max_num_vdevs = TARGET_NUM_VDEVS;
                ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
@@ -2074,10 +2279,10 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
        case ATH10K_FW_WMI_OP_VERSION_10_2:
        case ATH10K_FW_WMI_OP_VERSION_10_2_4:
                if (ath10k_peer_stats_enabled(ar)) {
-                       ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
+                       max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
                        ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS;
                } else {
-                       ar->max_num_peers = TARGET_10X_NUM_PEERS;
+                       max_num_peers = TARGET_10X_NUM_PEERS;
                        ar->max_num_stations = TARGET_10X_NUM_STATIONS;
                }
                ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
@@ -2086,7 +2291,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
                break;
        case ATH10K_FW_WMI_OP_VERSION_TLV:
-               ar->max_num_peers = TARGET_TLV_NUM_PEERS;
+               max_num_peers = TARGET_TLV_NUM_PEERS;
                ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
                ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
                ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
@@ -2098,7 +2303,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC;
                break;
        case ATH10K_FW_WMI_OP_VERSION_10_4:
-               ar->max_num_peers = TARGET_10_4_NUM_PEERS;
+               max_num_peers = TARGET_10_4_NUM_PEERS;
                ar->max_num_stations = TARGET_10_4_NUM_STATIONS;
                ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
                ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
@@ -2117,10 +2322,16 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                break;
        case ATH10K_FW_WMI_OP_VERSION_UNSET:
        case ATH10K_FW_WMI_OP_VERSION_MAX:
+       default:
                WARN_ON(1);
                return -EINVAL;
        }
 
+       if (ar->hw_params.num_peers)
+               ar->max_num_peers = ar->hw_params.num_peers;
+       else
+               ar->max_num_peers = max_num_peers;
+
        /* Backwards compatibility for firmwares without
         * ATH10K_FW_IE_HTT_OP_VERSION.
         */
@@ -2370,6 +2581,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
                             ar->wmi.svc_map))
                        val |= WMI_10_4_TDLS_UAPSD_BUFFER_STA;
 
+               if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI,
+                            ar->wmi.svc_map))
+                       val |= WMI_10_4_TX_DATA_ACK_RSSI;
+
                status = ath10k_mac_ext_resource_config(ar, val);
                if (status) {
                        ath10k_err(ar,
@@ -2405,7 +2620,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
         * possible to implicitly make it correct by creating a dummy vdev and
         * then deleting it.
         */
-       if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+       if (ar->hw_params.hw_filter_reset_required &&
+           mode == ATH10K_FIRMWARE_MODE_NORMAL) {
                status = ath10k_core_reset_rx_filter(ar);
                if (status) {
                        ath10k_err(ar,
@@ -2593,7 +2809,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
                if (ret)
                        ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
 
-               ret = ath10k_core_fetch_board_file(ar);
+               ret = ath10k_core_fetch_board_file(ar, ATH10K_BD_IE_BOARD);
                if (ret) {
                        ath10k_err(ar, "failed to fetch board file: %d\n", ret);
                        goto err_free_firmware_files;
@@ -2602,6 +2818,8 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
                ath10k_debug_print_board_info(ar);
        }
 
+       device_get_mac_address(ar->dev, ar->mac_addr, sizeof(ar->mac_addr));
+
        ret = ath10k_core_init_firmware_features(ar);
        if (ret) {
                ath10k_err(ar, "fatal problem with firmware features: %d\n",
@@ -2714,9 +2932,11 @@ err:
        return;
 }
 
-int ath10k_core_register(struct ath10k *ar, u32 chip_id)
+int ath10k_core_register(struct ath10k *ar,
+                        const struct ath10k_bus_params *bus_params)
 {
-       ar->chip_id = chip_id;
+       ar->chip_id = bus_params->chip_id;
+       ar->dev_type = bus_params->dev_type;
        queue_work(ar->workqueue, &ar->register_work);
 
        return 0;
index 9feea02e7d3730c9350c6c6cfda722acb2512c19..c76af343db3dc3fea09b641a08ae08cea777efa1 100644 (file)
 
 struct ath10k;
 
-enum ath10k_bus {
-       ATH10K_BUS_PCI,
-       ATH10K_BUS_AHB,
-       ATH10K_BUS_SDIO,
-       ATH10K_BUS_USB,
-       ATH10K_BUS_SNOC,
-};
-
 static inline const char *ath10k_bus_str(enum ath10k_bus bus)
 {
        switch (bus) {
@@ -461,6 +453,36 @@ struct ath10k_sta_tid_stats {
        unsigned long int rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MAX];
 };
 
+enum ath10k_counter_type {
+       ATH10K_COUNTER_TYPE_BYTES,
+       ATH10K_COUNTER_TYPE_PKTS,
+       ATH10K_COUNTER_TYPE_MAX,
+};
+
+enum ath10k_stats_type {
+       ATH10K_STATS_TYPE_SUCC,
+       ATH10K_STATS_TYPE_FAIL,
+       ATH10K_STATS_TYPE_RETRY,
+       ATH10K_STATS_TYPE_AMPDU,
+       ATH10K_STATS_TYPE_MAX,
+};
+
+struct ath10k_htt_data_stats {
+       u64 legacy[ATH10K_COUNTER_TYPE_MAX][ATH10K_LEGACY_NUM];
+       u64 ht[ATH10K_COUNTER_TYPE_MAX][ATH10K_HT_MCS_NUM];
+       u64 vht[ATH10K_COUNTER_TYPE_MAX][ATH10K_VHT_MCS_NUM];
+       u64 bw[ATH10K_COUNTER_TYPE_MAX][ATH10K_BW_NUM];
+       u64 nss[ATH10K_COUNTER_TYPE_MAX][ATH10K_NSS_NUM];
+       u64 gi[ATH10K_COUNTER_TYPE_MAX][ATH10K_GI_NUM];
+};
+
+struct ath10k_htt_tx_stats {
+       struct ath10k_htt_data_stats stats[ATH10K_STATS_TYPE_MAX];
+       u64 tx_duration;
+       u64 ba_fails;
+       u64 ack_fails;
+};
+
 struct ath10k_sta {
        struct ath10k_vif *arvif;
 
@@ -474,6 +496,7 @@ struct ath10k_sta {
 
        struct work_struct update_wk;
        u64 rx_duration;
+       struct ath10k_htt_tx_stats *tx_stats;
 
 #ifdef CONFIG_MAC80211_DEBUGFS
        /* protected by conf_mutex */
@@ -482,6 +505,8 @@ struct ath10k_sta {
        /* Protected with ar->data_lock */
        struct ath10k_sta_tid_stats tid_stats[IEEE80211_NUM_TIDS + 1];
 #endif
+       /* Protected with ar->data_lock */
+       u32 peer_ps_state;
 };
 
 #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
@@ -607,6 +632,7 @@ struct ath10k_debug {
        u32 reg_addr;
        u32 nf_cal_period;
        void *cal_data;
+       u32 enable_extd_tx_stats;
 };
 
 enum ath10k_state {
@@ -861,6 +887,9 @@ struct ath10k_fw_components {
        const struct firmware *board;
        const void *board_data;
        size_t board_len;
+       const struct firmware *ext_board;
+       const void *ext_board_data;
+       size_t ext_board_len;
 
        struct ath10k_fw_file fw_file;
 };
@@ -880,6 +909,16 @@ struct ath10k_per_peer_tx_stats {
        u32     reserved2;
 };
 
+enum ath10k_dev_type {
+       ATH10K_DEV_TYPE_LL,
+       ATH10K_DEV_TYPE_HL,
+};
+
+struct ath10k_bus_params {
+       u32 chip_id;
+       enum ath10k_dev_type dev_type;
+};
+
 struct ath10k {
        struct ath_common ath_common;
        struct ieee80211_hw *hw;
@@ -890,6 +929,7 @@ struct ath10k {
        enum ath10k_hw_rev hw_rev;
        u16 dev_id;
        u32 chip_id;
+       enum ath10k_dev_type dev_type;
        u32 target_version;
        u8 fw_version_major;
        u32 fw_version_minor;
@@ -908,6 +948,8 @@ struct ath10k {
        u32 low_5ghz_chan;
        u32 high_5ghz_chan;
        bool ani_enabled;
+       /* protected by conf_mutex */
+       u8 ps_state_enable;
 
        bool p2p;
 
@@ -947,7 +989,9 @@ struct ath10k {
 
                bool bmi_ids_valid;
                u8 bmi_board_id;
+               u8 bmi_eboard_id;
                u8 bmi_chip_id;
+               bool ext_bid_supported;
 
                char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH];
        } id;
@@ -1003,6 +1047,7 @@ struct ath10k {
 
        struct completion install_key_done;
 
+       int last_wmi_vdev_start_status;
        struct completion vdev_setup_done;
 
        struct workqueue_struct *workqueue;
@@ -1167,7 +1212,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
                      const struct ath10k_fw_components *fw_components);
 int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
 void ath10k_core_stop(struct ath10k *ar);
-int ath10k_core_register(struct ath10k *ar, u32 chip_id);
+int ath10k_core_register(struct ath10k *ar,
+                        const struct ath10k_bus_params *bus_params);
 void ath10k_core_unregister(struct ath10k *ar);
 
 #endif /* _CORE_H_ */
index 0baaad90b8d18708ae2058076e04c1f5627128e2..2c0cb6757fc6c1a78ee3d251a5a8aac5b92ca142 100644 (file)
@@ -2042,6 +2042,61 @@ static const struct file_operations fops_btcoex = {
        .open = simple_open
 };
 
+static ssize_t ath10k_write_enable_extd_tx_stats(struct file *file,
+                                                const char __user *ubuf,
+                                                size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       u32 filter;
+       int ret;
+
+       if (kstrtouint_from_user(ubuf, count, 0, &filter))
+               return -EINVAL;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (ar->state != ATH10K_STATE_ON) {
+               ar->debug.enable_extd_tx_stats = filter;
+               ret = count;
+               goto out;
+       }
+
+       if (filter == ar->debug.enable_extd_tx_stats) {
+               ret = count;
+               goto out;
+       }
+
+       ar->debug.enable_extd_tx_stats = filter;
+       ret = count;
+
+out:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static ssize_t ath10k_read_enable_extd_tx_stats(struct file *file,
+                                               char __user *ubuf,
+                                               size_t count, loff_t *ppos)
+
+{
+       char buf[32];
+       struct ath10k *ar = file->private_data;
+       int len = 0;
+
+       mutex_lock(&ar->conf_mutex);
+       len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+                       ar->debug.enable_extd_tx_stats);
+       mutex_unlock(&ar->conf_mutex);
+
+       return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_enable_extd_tx_stats = {
+       .read = ath10k_read_enable_extd_tx_stats,
+       .write = ath10k_write_enable_extd_tx_stats,
+       .open = simple_open
+};
+
 static ssize_t ath10k_write_peer_stats(struct file *file,
                                       const char __user *ubuf,
                                       size_t count, loff_t *ppos)
@@ -2343,6 +2398,85 @@ static const struct file_operations fops_warm_hw_reset = {
        .llseek = default_llseek,
 };
 
+static void ath10k_peer_ps_state_disable(void *data,
+                                        struct ieee80211_sta *sta)
+{
+       struct ath10k *ar = data;
+       struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+
+       spin_lock_bh(&ar->data_lock);
+       arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+       spin_unlock_bh(&ar->data_lock);
+}
+
+static ssize_t ath10k_write_ps_state_enable(struct file *file,
+                                           const char __user *user_buf,
+                                           size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       int ret;
+       u32 param;
+       u8 ps_state_enable;
+
+       if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
+               return -EINVAL;
+
+       if (ps_state_enable > 1 || ps_state_enable < 0)
+               return -EINVAL;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (ar->ps_state_enable == ps_state_enable) {
+               ret = count;
+               goto exit;
+       }
+
+       param = ar->wmi.pdev_param->peer_sta_ps_statechg_enable;
+       ret = ath10k_wmi_pdev_set_param(ar, param, ps_state_enable);
+       if (ret) {
+               ath10k_warn(ar, "failed to enable ps_state_enable: %d\n",
+                           ret);
+               goto exit;
+       }
+       ar->ps_state_enable = ps_state_enable;
+
+       if (!ar->ps_state_enable)
+               ieee80211_iterate_stations_atomic(ar->hw,
+                                                 ath10k_peer_ps_state_disable,
+                                                 ar);
+
+       ret = count;
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+
+       return ret;
+}
+
+static ssize_t ath10k_read_ps_state_enable(struct file *file,
+                                          char __user *user_buf,
+                                          size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       int len = 0;
+       char buf[32];
+
+       mutex_lock(&ar->conf_mutex);
+       len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+                       ar->ps_state_enable);
+       mutex_unlock(&ar->conf_mutex);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ps_state_enable = {
+       .read = ath10k_read_ps_state_enable,
+       .write = ath10k_write_ps_state_enable,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 int ath10k_debug_create(struct ath10k *ar)
 {
        ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
@@ -2454,10 +2588,15 @@ int ath10k_debug_register(struct ath10k *ar)
                debugfs_create_file("btcoex", 0644, ar->debug.debugfs_phy, ar,
                                    &fops_btcoex);
 
-       if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+       if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
                debugfs_create_file("peer_stats", 0644, ar->debug.debugfs_phy, ar,
                                    &fops_peer_stats);
 
+               debugfs_create_file("enable_extd_tx_stats", 0644,
+                                   ar->debug.debugfs_phy, ar,
+                                   &fops_enable_extd_tx_stats);
+       }
+
        debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar,
                            &fops_fw_checksums);
 
@@ -2474,6 +2613,9 @@ int ath10k_debug_register(struct ath10k *ar)
        debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar,
                            &fops_warm_hw_reset);
 
+       debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_phy, ar,
+                           &fops_ps_state_enable);
+
        return 0;
 }
 
index 0afca5c106b69e24cc4beb4acc1b4d01e546bac3..3a6191cff2f99e799099140d4794520253744b0a 100644 (file)
@@ -128,6 +128,10 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
        return ar->debug.fw_dbglog_level;
 }
 
+static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
+{
+       return ar->debug.enable_extd_tx_stats;
+}
 #else
 
 static inline int ath10k_debug_start(struct ath10k *ar)
@@ -190,6 +194,11 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
        return 0;
 }
 
+static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
+{
+       return 0;
+}
+
 #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
 
 #define ath10k_debug_get_et_strings NULL
index a63c97e2c50c5d29915b334d2937eca5b3892601..b09cdc699c69856b0e07442fb6b256d0f1d911d8 100644 (file)
@@ -460,6 +460,33 @@ static const struct file_operations fops_peer_debug_trigger = {
        .llseek = default_llseek,
 };
 
+static ssize_t ath10k_dbg_sta_read_peer_ps_state(struct file *file,
+                                                char __user *user_buf,
+                                                size_t count, loff_t *ppos)
+{
+       struct ieee80211_sta *sta = file->private_data;
+       struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+       struct ath10k *ar = arsta->arvif->ar;
+       char buf[20];
+       int len = 0;
+
+       spin_lock_bh(&ar->data_lock);
+
+       len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+                       arsta->peer_ps_state);
+
+       spin_unlock_bh(&ar->data_lock);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_ps_state = {
+       .open = simple_open,
+       .read = ath10k_dbg_sta_read_peer_ps_state,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 static char *get_err_str(enum ath10k_pkt_rx_err i)
 {
        switch (i) {
@@ -626,9 +653,105 @@ static const struct file_operations fops_tid_stats_dump = {
        .llseek = default_llseek,
 };
 
+static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
+                                           char __user *user_buf,
+                                           size_t count, loff_t *ppos)
+{
+       struct ieee80211_sta *sta = file->private_data;
+       struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+       struct ath10k *ar = arsta->arvif->ar;
+       struct ath10k_htt_data_stats *stats;
+       const char *str_name[ATH10K_STATS_TYPE_MAX] = {"succ", "fail",
+                                                      "retry", "ampdu"};
+       const char *str[ATH10K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
+       int len = 0, i, j, k, retval = 0;
+       const int size = 2 * 4096;
+       char *buf;
+
+       buf = kzalloc(size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       mutex_lock(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       for (k = 0; k < ATH10K_STATS_TYPE_MAX; k++) {
+               for (j = 0; j < ATH10K_COUNTER_TYPE_MAX; j++) {
+                       stats = &arsta->tx_stats->stats[k];
+                       len += scnprintf(buf + len, size - len, "%s_%s\n",
+                                        str_name[k],
+                                        str[j]);
+                       len += scnprintf(buf + len, size - len,
+                                        " VHT MCS %s\n",
+                                        str[j]);
+                       for (i = 0; i < ATH10K_VHT_MCS_NUM; i++)
+                               len += scnprintf(buf + len, size - len,
+                                                "  %llu ",
+                                                stats->vht[j][i]);
+                       len += scnprintf(buf + len, size - len, "\n");
+                       len += scnprintf(buf + len, size - len, " HT MCS %s\n",
+                                        str[j]);
+                       for (i = 0; i < ATH10K_HT_MCS_NUM; i++)
+                               len += scnprintf(buf + len, size - len,
+                                                "  %llu ", stats->ht[j][i]);
+                       len += scnprintf(buf + len, size - len, "\n");
+                       len += scnprintf(buf + len, size - len,
+                                       " BW %s (20,40,80,160 MHz)\n", str[j]);
+                       len += scnprintf(buf + len, size - len,
+                                        "  %llu %llu %llu %llu\n",
+                                        stats->bw[j][0], stats->bw[j][1],
+                                        stats->bw[j][2], stats->bw[j][3]);
+                       len += scnprintf(buf + len, size - len,
+                                        " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
+                       len += scnprintf(buf + len, size - len,
+                                        "  %llu %llu %llu %llu\n",
+                                        stats->nss[j][0], stats->nss[j][1],
+                                        stats->nss[j][2], stats->nss[j][3]);
+                       len += scnprintf(buf + len, size - len,
+                                        " GI %s (LGI,SGI)\n",
+                                        str[j]);
+                       len += scnprintf(buf + len, size - len, "  %llu %llu\n",
+                                        stats->gi[j][0], stats->gi[j][1]);
+                       len += scnprintf(buf + len, size - len,
+                                        " legacy rate %s (1,2 ... Mbps)\n  ",
+                                        str[j]);
+                       for (i = 0; i < ATH10K_LEGACY_NUM; i++)
+                               len += scnprintf(buf + len, size - len, "%llu ",
+                                                stats->legacy[j][i]);
+                       len += scnprintf(buf + len, size - len, "\n");
+               }
+       }
+
+       len += scnprintf(buf + len, size - len,
+                        "\nTX duration\n %llu usecs\n",
+                        arsta->tx_stats->tx_duration);
+       len += scnprintf(buf + len, size - len,
+                       "BA fails\n %llu\n", arsta->tx_stats->ba_fails);
+       len += scnprintf(buf + len, size - len,
+                       "ack fails\n %llu\n", arsta->tx_stats->ack_fails);
+       spin_unlock_bh(&ar->data_lock);
+
+       if (len > size)
+               len = size;
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       mutex_unlock(&ar->conf_mutex);
+       return retval;
+}
+
+static const struct file_operations fops_tx_stats = {
+       .read = ath10k_dbg_sta_dump_tx_stats,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                            struct ieee80211_sta *sta, struct dentry *dir)
 {
+       struct ath10k *ar = hw->priv;
+
        debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
        debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
        debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
@@ -637,4 +760,11 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                            &fops_peer_debug_trigger);
        debugfs_create_file("dump_tid_stats", 0400, dir, sta,
                            &fops_tid_stats_dump);
+
+       if (ath10k_peer_stats_enabled(ar) &&
+           ath10k_debug_is_extd_tx_stats_enabled(ar))
+               debugfs_create_file("tx_stats", 0400, dir, sta,
+                                   &fops_tx_stats);
+       debugfs_create_file("peer_ps_state", 0400, dir, sta,
+                           &fops_peer_ps_state);
 }
index 331b8d558791d3c36d605669cee6658fac22f937..28daed5981a11570270762d97afdf5790d6902f7 100644 (file)
@@ -53,7 +53,8 @@ static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
 {
        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
 
-       dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+       if (htc->ar->dev_type != ATH10K_DEV_TYPE_HL)
+               dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
        skb_pull(skb, sizeof(struct ath10k_htc_hdr));
 }
 
@@ -137,11 +138,14 @@ int ath10k_htc_send(struct ath10k_htc *htc,
        ath10k_htc_prepare_tx_skb(ep, skb);
 
        skb_cb->eid = eid;
-       skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
-       ret = dma_mapping_error(dev, skb_cb->paddr);
-       if (ret) {
-               ret = -EIO;
-               goto err_credits;
+       if (ar->dev_type != ATH10K_DEV_TYPE_HL) {
+               skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
+                                              DMA_TO_DEVICE);
+               ret = dma_mapping_error(dev, skb_cb->paddr);
+               if (ret) {
+                       ret = -EIO;
+                       goto err_credits;
+               }
        }
 
        sg_item.transfer_id = ep->eid;
@@ -157,7 +161,8 @@ int ath10k_htc_send(struct ath10k_htc *htc,
        return 0;
 
 err_unmap:
-       dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+       if (ar->dev_type != ATH10K_DEV_TYPE_HL)
+               dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
 err_credits:
        if (ep->tx_credit_flow_enabled) {
                spin_lock_bh(&htc->tx_lock);
@@ -803,8 +808,11 @@ setup:
                                                ep->service_id,
                                                &ep->ul_pipe_id,
                                                &ep->dl_pipe_id);
-       if (status)
+       if (status) {
+               ath10k_warn(ar, "unsupported HTC service id: %d\n",
+                           ep->service_id);
                return status;
+       }
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT,
                   "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
@@ -838,6 +846,56 @@ struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
        return skb;
 }
 
+static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+       trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
+       dev_kfree_skb_any(skb);
+}
+
+static int ath10k_htc_pktlog_connect(struct ath10k *ar)
+{
+       struct ath10k_htc_svc_conn_resp conn_resp;
+       struct ath10k_htc_svc_conn_req conn_req;
+       int status;
+
+       memset(&conn_req, 0, sizeof(conn_req));
+       memset(&conn_resp, 0, sizeof(conn_resp));
+
+       conn_req.ep_ops.ep_tx_complete = NULL;
+       conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx;
+       conn_req.ep_ops.ep_tx_credits = NULL;
+
+       /* connect to control service */
+       conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
+       status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
+       if (status) {
+               ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
+                           status);
+               return status;
+       }
+
+       return 0;
+}
+
+static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
+{
+       u8 ul_pipe_id;
+       u8 dl_pipe_id;
+       int status;
+
+       status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG,
+                                               &ul_pipe_id,
+                                               &dl_pipe_id);
+       if (status) {
+               ath10k_warn(ar, "unsupported HTC service id: %d\n",
+                           ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
+
+               return false;
+       }
+
+       return true;
+}
+
 int ath10k_htc_start(struct ath10k_htc *htc)
 {
        struct ath10k *ar = htc->ar;
@@ -871,6 +929,14 @@ int ath10k_htc_start(struct ath10k_htc *htc)
                return status;
        }
 
+       if (ath10k_htc_pktlog_svc_supported(ar)) {
+               status = ath10k_htc_pktlog_connect(ar);
+               if (status) {
+                       ath10k_err(ar, "failed to connect to pktlog: %d\n", status);
+                       return status;
+               }
+       }
+
        return 0;
 }
 
index 5d3ff80f3a1f9dd8a27019e85e0345b269998b19..a76f7c9e2199563ffb2fe65ab4e1f9f072467d98 100644 (file)
@@ -29,7 +29,6 @@
 #include "htc.h"
 #include "hw.h"
 #include "rx_desc.h"
-#include "hw.h"
 
 enum htt_dbg_stats_type {
        HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
@@ -577,6 +576,8 @@ struct htt_mgmt_tx_completion {
 #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK     0xFF000000
 #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB      24
 
+#define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0)
+
 struct htt_rx_indication_hdr {
        u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
        __le16 peer_id;
@@ -719,6 +720,15 @@ struct htt_rx_indication {
        struct htt_rx_indication_mpdu_range mpdu_ranges[0];
 } __packed;
 
+/* High latency version of the RX indication */
+struct htt_rx_indication_hl {
+       struct htt_rx_indication_hdr hdr;
+       struct htt_rx_indication_ppdu ppdu;
+       struct htt_rx_indication_prefix prefix;
+       struct fw_rx_desc_hl fw_desc;
+       struct htt_rx_indication_mpdu_range mpdu_ranges[0];
+} __packed;
+
 static inline struct htt_rx_indication_mpdu_range *
                htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
 {
@@ -731,6 +741,18 @@ static inline struct htt_rx_indication_mpdu_range *
        return ptr;
 }
 
+static inline struct htt_rx_indication_mpdu_range *
+       htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind)
+{
+       void *ptr = rx_ind;
+
+       ptr += sizeof(rx_ind->hdr)
+            + sizeof(rx_ind->ppdu)
+            + sizeof(rx_ind->prefix)
+            + sizeof(rx_ind->fw_desc);
+       return ptr;
+}
+
 enum htt_rx_flush_mpdu_status {
        HTT_RX_FLUSH_MPDU_DISCARD = 0,
        HTT_RX_FLUSH_MPDU_REORDER = 1,
@@ -840,7 +862,7 @@ struct htt_data_tx_completion {
                } __packed;
        } __packed;
        u8 num_msdus;
-       u8 rsvd0;
+       u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */
        __le16 msdus[0]; /* variable length based on %num_msdus */
 } __packed;
 
@@ -1641,6 +1663,7 @@ struct htt_resp {
                struct htt_mgmt_tx_completion mgmt_tx_completion;
                struct htt_data_tx_completion data_tx_completion;
                struct htt_rx_indication rx_ind;
+               struct htt_rx_indication_hl rx_ind_hl;
                struct htt_rx_fragment_indication rx_frag_ind;
                struct htt_rx_peer_map peer_map;
                struct htt_rx_peer_unmap peer_unmap;
@@ -1994,6 +2017,31 @@ struct htt_rx_desc {
        u8 msdu_payload[0];
 };
 
+#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK           0x00000fff
+#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB            0
+#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK         0x00001000
+#define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB          12
+#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000
+#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB  13
+#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK       0x00008000
+#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB        15
+#define HTT_RX_DESC_HL_INFO_FRAGMENT_MASK          0x00010000
+#define HTT_RX_DESC_HL_INFO_FRAGMENT_LSB           16
+#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK        0x01fe0000
+#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB         17
+
+struct htt_rx_desc_base_hl {
+       __le32 info; /* HTT_RX_DESC_HL_INFO_ */
+};
+
+struct htt_rx_chan_info {
+       __le16 primary_chan_center_freq_mhz;
+       __le16 contig_chan1_center_freq_mhz;
+       __le16 contig_chan2_center_freq_mhz;
+       u8 phy_mode;
+       u8 reserved;
+} __packed;
+
 #define HTT_RX_DESC_ALIGN 8
 
 #define HTT_MAC_ADDR_LEN 6
index 4d1cd90d6d27c3d299095626efd80888eb9e840c..f2405258a6d39f8a7b40f87357a94cc5ab9d2d70 100644 (file)
@@ -265,6 +265,9 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
        struct ath10k_htt *htt = &ar->htt;
        int ret;
 
+       if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+               return 0;
+
        spin_lock_bh(&htt->rx_ring.lock);
        ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
                                              htt->rx_ring.fill_cnt));
@@ -279,6 +282,9 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
 
 void ath10k_htt_rx_free(struct ath10k_htt *htt)
 {
+       if (htt->ar->dev_type == ATH10K_DEV_TYPE_HL)
+               return;
+
        del_timer_sync(&htt->rx_ring.refill_retry_timer);
 
        skb_queue_purge(&htt->rx_msdus_q);
@@ -570,6 +576,9 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
        size_t size;
        struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
 
+       if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+               return 0;
+
        htt->rx_confused = false;
 
        /* XXX: The fill level could be changed during runtime in response to
@@ -1176,11 +1185,11 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
         */
 
        /* This probably shouldn't happen but warn just in case */
-       if (unlikely(WARN_ON_ONCE(!is_first)))
+       if (WARN_ON_ONCE(!is_first))
                return;
 
        /* This probably shouldn't happen but warn just in case */
-       if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
+       if (WARN_ON_ONCE(!(is_first && is_last)))
                return;
 
        skb_trim(msdu, msdu->len - FCS_LEN);
@@ -1846,8 +1855,116 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
        return 0;
 }
 
-static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
-                                     struct htt_rx_indication *rx)
+static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
+                                        struct htt_rx_indication_hl *rx,
+                                        struct sk_buff *skb)
+{
+       struct ath10k *ar = htt->ar;
+       struct ath10k_peer *peer;
+       struct htt_rx_indication_mpdu_range *mpdu_ranges;
+       struct fw_rx_desc_hl *fw_desc;
+       struct ieee80211_hdr *hdr;
+       struct ieee80211_rx_status *rx_status;
+       u16 peer_id;
+       u8 rx_desc_len;
+       int num_mpdu_ranges;
+       size_t tot_hdr_len;
+       struct ieee80211_channel *ch;
+
+       peer_id = __le16_to_cpu(rx->hdr.peer_id);
+
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find_by_id(ar, peer_id);
+       spin_unlock_bh(&ar->data_lock);
+       if (!peer)
+               ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
+
+       num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
+                            HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+       mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
+       fw_desc = &rx->fw_desc;
+       rx_desc_len = fw_desc->len;
+
+       /* I have not yet seen any case where num_mpdu_ranges > 1.
+        * qcacld does not seem handle that case either, so we introduce the
+        * same limitiation here as well.
+        */
+       if (num_mpdu_ranges > 1)
+               ath10k_warn(ar,
+                           "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
+                           num_mpdu_ranges);
+
+       if (mpdu_ranges->mpdu_range_status !=
+           HTT_RX_IND_MPDU_STATUS_OK) {
+               ath10k_warn(ar, "MPDU range status: %d\n",
+                           mpdu_ranges->mpdu_range_status);
+               goto err;
+       }
+
+       /* Strip off all headers before the MAC header before delivery to
+        * mac80211
+        */
+       tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
+                     sizeof(rx->ppdu) + sizeof(rx->prefix) +
+                     sizeof(rx->fw_desc) +
+                     sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
+       skb_pull(skb, tot_hdr_len);
+
+       hdr = (struct ieee80211_hdr *)skb->data;
+       rx_status = IEEE80211_SKB_RXCB(skb);
+       rx_status->chains |= BIT(0);
+       rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
+                           rx->ppdu.combined_rssi;
+       rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
+
+       spin_lock_bh(&ar->data_lock);
+       ch = ar->scan_channel;
+       if (!ch)
+               ch = ar->rx_channel;
+       if (!ch)
+               ch = ath10k_htt_rx_h_any_channel(ar);
+       if (!ch)
+               ch = ar->tgt_oper_chan;
+       spin_unlock_bh(&ar->data_lock);
+
+       if (ch) {
+               rx_status->band = ch->band;
+               rx_status->freq = ch->center_freq;
+       }
+       if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
+               rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
+       else
+               rx_status->flag |= RX_FLAG_AMSDU_MORE;
+
+       /* Not entirely sure about this, but all frames from the chipset has
+        * the protected flag set even though they have already been decrypted.
+        * Unmasking this flag is necessary in order for mac80211 not to drop
+        * the frame.
+        * TODO: Verify this is always the case or find out a way to check
+        * if there has been hw decryption.
+        */
+       if (ieee80211_has_protected(hdr->frame_control)) {
+               hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+               rx_status->flag |= RX_FLAG_DECRYPTED |
+                                  RX_FLAG_IV_STRIPPED |
+                                  RX_FLAG_MMIC_STRIPPED;
+       }
+
+       ieee80211_rx_ni(ar->hw, skb);
+
+       /* We have delivered the skb to the upper layers (mac80211) so we
+        * must not free it.
+        */
+       return false;
+err:
+       /* Tell the caller that it must free the skb since we have not
+        * consumed it
+        */
+       return true;
+}
+
+static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
+                                        struct htt_rx_indication *rx)
 {
        struct ath10k *ar = htt->ar;
        struct htt_rx_indication_mpdu_range *mpdu_ranges;
@@ -1884,7 +2001,9 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
        struct htt_resp *resp = (struct htt_resp *)skb->data;
        struct htt_tx_done tx_done = {};
        int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
-       __le16 msdu_id;
+       __le16 msdu_id, *msdus;
+       bool rssi_enabled = false;
+       u8 msdu_count = 0;
        int i;
 
        switch (status) {
@@ -1908,10 +2027,30 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
                   resp->data_tx_completion.num_msdus);
 
-       for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
-               msdu_id = resp->data_tx_completion.msdus[i];
+       msdu_count = resp->data_tx_completion.num_msdus;
+
+       if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI)
+               rssi_enabled = true;
+
+       for (i = 0; i < msdu_count; i++) {
+               msdus = resp->data_tx_completion.msdus;
+               msdu_id = msdus[i];
                tx_done.msdu_id = __le16_to_cpu(msdu_id);
 
+               if (rssi_enabled) {
+                       /* Total no of MSDUs should be even,
+                        * if odd MSDUs are sent firmware fills
+                        * last msdu id with 0xffff
+                        */
+                       if (msdu_count & 0x01) {
+                               msdu_id = msdus[msdu_count +  i + 1];
+                               tx_done.ack_rssi = __le16_to_cpu(msdu_id);
+                       } else {
+                               msdu_id = msdus[msdu_count +  i];
+                               tx_done.ack_rssi = __le16_to_cpu(msdu_id);
+                       }
+               }
+
                /* kfifo_put: In practice firmware shouldn't fire off per-CE
                 * interrupt and main interrupt (MSI/-X range case) for the same
                 * HTC service so it should be safe to use kfifo_put w/o lock.
@@ -2488,7 +2627,7 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                dev_kfree_skb_any(skb);
 }
 
-static inline bool is_valid_legacy_rate(u8 rate)
+static inline int ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
 {
        static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
                                          18, 24, 36, 48, 54};
@@ -2496,10 +2635,116 @@ static inline bool is_valid_legacy_rate(u8 rate)
 
        for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
                if (rate == legacy_rates[i])
-                       return true;
+                       return i;
        }
 
-       return false;
+       ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate);
+       return -EINVAL;
+}
+
+static void
+ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
+                                   struct ath10k_sta *arsta,
+                                   struct ath10k_per_peer_tx_stats *pstats,
+                                   u8 legacy_rate_idx)
+{
+       struct rate_info *txrate = &arsta->txrate;
+       struct ath10k_htt_tx_stats *tx_stats;
+       int ht_idx, gi, mcs, bw, nss;
+
+       if (!arsta->tx_stats)
+               return;
+
+       tx_stats = arsta->tx_stats;
+       gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI);
+       ht_idx = txrate->mcs + txrate->nss * 8;
+       mcs = txrate->mcs;
+       bw = txrate->bw;
+       nss = txrate->nss;
+
+#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
+
+       if (txrate->flags == RATE_INFO_FLAGS_VHT_MCS) {
+               STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
+               STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
+               STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
+               STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
+               STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
+               STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
+       } else if (txrate->flags == RATE_INFO_FLAGS_MCS) {
+               STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
+               STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
+               STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
+               STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
+               STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
+               STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
+       } else {
+               mcs = legacy_rate_idx;
+               if (mcs < 0)
+                       return;
+
+               STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
+               STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
+               STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
+               STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
+               STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
+               STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
+       }
+
+       if (ATH10K_HW_AMPDU(pstats->flags)) {
+               tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
+
+               if (txrate->flags == RATE_INFO_FLAGS_MCS) {
+                       STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
+                               pstats->succ_bytes + pstats->retry_bytes;
+                       STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
+                               pstats->succ_pkts + pstats->retry_pkts;
+               } else {
+                       STATS_OP_FMT(AMPDU).vht[0][mcs] +=
+                               pstats->succ_bytes + pstats->retry_bytes;
+                       STATS_OP_FMT(AMPDU).vht[1][mcs] +=
+                               pstats->succ_pkts + pstats->retry_pkts;
+               }
+               STATS_OP_FMT(AMPDU).bw[0][bw] +=
+                       pstats->succ_bytes + pstats->retry_bytes;
+               STATS_OP_FMT(AMPDU).nss[0][nss] +=
+                       pstats->succ_bytes + pstats->retry_bytes;
+               STATS_OP_FMT(AMPDU).gi[0][gi] +=
+                       pstats->succ_bytes + pstats->retry_bytes;
+               STATS_OP_FMT(AMPDU).bw[1][bw] +=
+                       pstats->succ_pkts + pstats->retry_pkts;
+               STATS_OP_FMT(AMPDU).nss[1][nss] +=
+                       pstats->succ_pkts + pstats->retry_pkts;
+               STATS_OP_FMT(AMPDU).gi[1][gi] +=
+                       pstats->succ_pkts + pstats->retry_pkts;
+       } else {
+               tx_stats->ack_fails +=
+                               ATH10K_HW_BA_FAIL(pstats->flags);
+       }
+
+       STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
+       STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes;
+       STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
+
+       STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
+       STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts;
+       STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
+
+       STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
+       STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes;
+       STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
+
+       STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
+       STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts;
+       STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
+
+       STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
+       STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes;
+       STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
+
+       STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
+       STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts;
+       STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
 }
 
 static void
@@ -2508,7 +2753,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
                                struct ath10k_per_peer_tx_stats *peer_stats)
 {
        struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
-       u8 rate = 0, sgi;
+       u8 rate = 0, rate_idx = 0, sgi;
        struct rate_info txrate;
 
        lockdep_assert_held(&ar->data_lock);
@@ -2536,17 +2781,12 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
        if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
            txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
                rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
-
-               if (!is_valid_legacy_rate(rate)) {
-                       ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
-                                   rate);
-                       return;
-               }
-
                /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
-               rate *= 10;
-               if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
-                       rate = rate - 5;
+               if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
+                       rate = 5;
+               rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
+               if (rate_idx < 0)
+                       return;
                arsta->txrate.legacy = rate;
        } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
                arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
@@ -2561,6 +2801,10 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
 
        arsta->txrate.nss = txrate.nss;
        arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
+
+       if (ath10k_debug_is_extd_tx_stats_enabled(ar))
+               ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
+                                                   rate_idx);
 }
 
 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
@@ -2702,7 +2946,12 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                break;
        }
        case HTT_T2H_MSG_TYPE_RX_IND:
-               ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
+               if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+                       return ath10k_htt_rx_proc_rx_ind_hl(htt,
+                                                           &resp->rx_ind_hl,
+                                                           skb);
+               else
+                       ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
                break;
        case HTT_T2H_MSG_TYPE_PEER_MAP: {
                struct htt_peer_map_event ev = {
@@ -2986,11 +3235,16 @@ static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
        .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
 };
 
+static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
+};
+
 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
 
-       if (ar->hw_params.target_64bit)
+       if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+               htt->rx_ops = &htt_rx_ops_hl;
+       else if (ar->hw_params.target_64bit)
                htt->rx_ops = &htt_rx_ops_64;
        else
                htt->rx_ops = &htt_rx_ops_32;
index 7cff0d52338fe1de23da635d23c0e469593f0c2a..ad05ab714c9b3160f3c42d59ac776c6b0de4066d 100644 (file)
@@ -495,6 +495,9 @@ int ath10k_htt_tx_start(struct ath10k_htt *htt)
        if (htt->tx_mem_allocated)
                return 0;
 
+       if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+               return 0;
+
        ret = ath10k_htt_tx_alloc_buf(htt);
        if (ret)
                goto free_idr_pending_tx;
@@ -934,6 +937,57 @@ static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
        return 0;
 }
 
+static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt)
+{
+       struct ath10k *ar = htt->ar;
+       struct sk_buff *skb;
+       struct htt_cmd *cmd;
+       struct htt_rx_ring_setup_ring32 *ring;
+       const int num_rx_ring = 1;
+       u16 flags;
+       int len;
+       int ret;
+
+       /*
+        * the HW expects the buffer to be an integral number of 4-byte
+        * "words"
+        */
+       BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+       BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+       len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
+           + (sizeof(*ring) * num_rx_ring);
+       skb = ath10k_htc_alloc_skb(ar, len);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, len);
+
+       cmd = (struct htt_cmd *)skb->data;
+       ring = &cmd->rx_setup_32.rings[0];
+
+       cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+       cmd->rx_setup_32.hdr.num_rings = 1;
+
+       flags = 0;
+       flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+       flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+       flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+
+       memset(ring, 0, sizeof(*ring));
+       ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN);
+       ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+       ring->flags = __cpu_to_le16(flags);
+
+       ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+
+       return 0;
+}
+
 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
                                u8 max_subfrms_ampdu,
                                u8 max_subfrms_amsdu)
@@ -1123,7 +1177,8 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        return 0;
 
 err_unmap_msdu:
-       dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+       if (ar->dev_type != ATH10K_DEV_TYPE_HL)
+               dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 err_free_txdesc:
        dev_kfree_skb_any(txdesc);
 err_free_msdu_id:
@@ -1134,6 +1189,94 @@ err:
        return res;
 }
 
+#define HTT_TX_HL_NEEDED_HEADROOM \
+       (unsigned int)(sizeof(struct htt_cmd_hdr) + \
+       sizeof(struct htt_data_tx_desc) + \
+       sizeof(struct ath10k_htc_hdr))
+
+static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
+                           struct sk_buff *msdu)
+{
+       struct ath10k *ar = htt->ar;
+       int res, data_len;
+       struct htt_cmd_hdr *cmd_hdr;
+       struct htt_data_tx_desc *tx_desc;
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+       struct sk_buff *tmp_skb;
+       bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+       u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+       u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
+       u8 flags0 = 0;
+       u16 flags1 = 0;
+
+       data_len = msdu->len;
+
+       switch (txmode) {
+       case ATH10K_HW_TXRX_RAW:
+       case ATH10K_HW_TXRX_NATIVE_WIFI:
+               flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+               /* fall through */
+       case ATH10K_HW_TXRX_ETHERNET:
+               flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+               break;
+       case ATH10K_HW_TXRX_MGMT:
+               flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+                            HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+               flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+               break;
+       }
+
+       if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
+               flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+       flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+       flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+       if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+           !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+               flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+               flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+       }
+
+       /* Prepend the HTT header and TX desc struct to the data message
+        * and realloc the skb if it does not have enough headroom.
+        */
+       if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) {
+               tmp_skb = msdu;
+
+               ath10k_dbg(htt->ar, ATH10K_DBG_HTT,
+                          "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",
+                          skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM);
+               msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM);
+               kfree_skb(tmp_skb);
+               if (!msdu) {
+                       ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n");
+                       res = -ENOMEM;
+                       goto out;
+               }
+       }
+
+       skb_push(msdu, sizeof(*cmd_hdr));
+       skb_push(msdu, sizeof(*tx_desc));
+       cmd_hdr = (struct htt_cmd_hdr *)msdu->data;
+       tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr));
+
+       cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+       tx_desc->flags0 = flags0;
+       tx_desc->flags1 = __cpu_to_le16(flags1);
+       tx_desc->len = __cpu_to_le16(data_len);
+       tx_desc->id = 0;
+       tx_desc->frags_paddr = 0; /* always zero */
+       /* Initialize peer_id to INVALID_PEER because this is NOT
+        * Reinjection path
+        */
+       tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
+
+       res = ath10k_htc_send(&htt->ar->htc, htt->eid, msdu);
+
+out:
+       return res;
+}
+
 static int ath10k_htt_tx_32(struct ath10k_htt *htt,
                            enum ath10k_hw_txrx_mode txmode,
                            struct sk_buff *msdu)
@@ -1561,11 +1704,19 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
        .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
 };
 
+static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
+       .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl,
+       .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
+       .htt_tx = ath10k_htt_tx_hl,
+};
+
 void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
 
-       if (ar->hw_params.target_64bit)
+       if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+               htt->tx_ops = &htt_tx_ops_hl;
+       else if (ar->hw_params.target_64bit)
                htt->tx_ops = &htt_tx_ops_64;
        else
                htt->tx_ops = &htt_tx_ops_32;
index 677535b3d2070eea5d20983b89c9aba44be4a829..af8ae8117c6223e5bb3ae29fcb058ebd3560f526 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/types.h>
 #include <linux/bitops.h>
+#include <linux/bitfield.h>
 #include "core.h"
 #include "hw.h"
 #include "hif.h"
@@ -918,6 +919,196 @@ static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar)
        return 0;
 }
 
+/* Program CPU_ADDR_MSB to allow different memory
+ * region access.
+ */
+static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb)
+{
+       u32 address = SOC_CORE_BASE_ADDRESS + FW_RAM_CONFIG_ADDRESS;
+
+       ath10k_hif_write32(ar, address, msb);
+}
+
+/* 1. Write to memory region of target, such as IRAM adn DRAM.
+ * 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000)
+ *    can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too.
+ * 3. In order to access the region other than the above,
+ *    we need to set the value of register CPU_ADDR_MSB.
+ * 4. Target memory access space is limited to 1M size. If the size is larger
+ *    than 1M, need to split it and program CPU_ADDR_MSB accordingly.
+ */
+static int ath10k_hw_diag_segment_msb_download(struct ath10k *ar,
+                                              const void *buffer,
+                                              u32 address,
+                                              u32 length)
+{
+       u32 addr = address & REGION_ACCESS_SIZE_MASK;
+       int ret, remain_size, size;
+       const u8 *buf;
+
+       ath10k_hw_map_target_mem(ar, CPU_ADDR_MSB_REGION_VAL(address));
+
+       if (addr + length > REGION_ACCESS_SIZE_LIMIT) {
+               size = REGION_ACCESS_SIZE_LIMIT - addr;
+               remain_size = length - size;
+
+               ret = ath10k_hif_diag_write(ar, address, buffer, size);
+               if (ret) {
+                       ath10k_warn(ar,
+                                   "failed to download the first %d bytes segment to address:0x%x: %d\n",
+                                   size, address, ret);
+                       goto done;
+               }
+
+               /* Change msb to the next memory region*/
+               ath10k_hw_map_target_mem(ar,
+                                        CPU_ADDR_MSB_REGION_VAL(address) + 1);
+               buf = buffer +  size;
+               ret = ath10k_hif_diag_write(ar,
+                                           address & ~REGION_ACCESS_SIZE_MASK,
+                                           buf, remain_size);
+               if (ret) {
+                       ath10k_warn(ar,
+                                   "failed to download the second %d bytes segment to address:0x%x: %d\n",
+                                   remain_size,
+                                   address & ~REGION_ACCESS_SIZE_MASK,
+                                   ret);
+                       goto done;
+               }
+       } else {
+               ret = ath10k_hif_diag_write(ar, address, buffer, length);
+               if (ret) {
+                       ath10k_warn(ar,
+                                   "failed to download the only %d bytes segment to address:0x%x: %d\n",
+                                   length, address, ret);
+                       goto done;
+               }
+       }
+
+done:
+       /* Change msb to DRAM */
+       ath10k_hw_map_target_mem(ar,
+                                CPU_ADDR_MSB_REGION_VAL(DRAM_BASE_ADDRESS));
+       return ret;
+}
+
+static int ath10k_hw_diag_segment_download(struct ath10k *ar,
+                                          const void *buffer,
+                                          u32 address,
+                                          u32 length)
+{
+       if (address >= DRAM_BASE_ADDRESS + REGION_ACCESS_SIZE_LIMIT)
+               /* Needs to change MSB for memory write */
+               return ath10k_hw_diag_segment_msb_download(ar, buffer,
+                                                          address, length);
+       else
+               return ath10k_hif_diag_write(ar, address, buffer, length);
+}
+
+int ath10k_hw_diag_fast_download(struct ath10k *ar,
+                                u32 address,
+                                const void *buffer,
+                                u32 length)
+{
+       const u8 *buf = buffer;
+       bool sgmt_end = false;
+       u32 base_addr = 0;
+       u32 base_len = 0;
+       u32 left = 0;
+       struct bmi_segmented_file_header *hdr;
+       struct bmi_segmented_metadata *metadata;
+       int ret = 0;
+
+       if (length < sizeof(*hdr))
+               return -EINVAL;
+
+       /* check firmware header. If it has no correct magic number
+        * or it's compressed, returns error.
+        */
+       hdr = (struct bmi_segmented_file_header *)buf;
+       if (__le32_to_cpu(hdr->magic_num) != BMI_SGMTFILE_MAGIC_NUM) {
+               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                          "Not a supported firmware, magic_num:0x%x\n",
+                          hdr->magic_num);
+               return -EINVAL;
+       }
+
+       if (hdr->file_flags != 0) {
+               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                          "Not a supported firmware, file_flags:0x%x\n",
+                          hdr->file_flags);
+               return -EINVAL;
+       }
+
+       metadata = (struct bmi_segmented_metadata *)hdr->data;
+       left = length - sizeof(*hdr);
+
+       while (left > 0) {
+               if (left < sizeof(*metadata)) {
+                       ath10k_warn(ar, "firmware segment is truncated: %d\n",
+                                   left);
+                       ret = -EINVAL;
+                       break;
+               }
+               base_addr = __le32_to_cpu(metadata->addr);
+               base_len = __le32_to_cpu(metadata->length);
+               buf = metadata->data;
+               left -= sizeof(*metadata);
+
+               switch (base_len) {
+               case BMI_SGMTFILE_BEGINADDR:
+                       /* base_addr is the start address to run */
+                       ret = ath10k_bmi_set_start(ar, base_addr);
+                       base_len = 0;
+                       break;
+               case BMI_SGMTFILE_DONE:
+                       /* no more segment */
+                       base_len = 0;
+                       sgmt_end = true;
+                       ret = 0;
+                       break;
+               case BMI_SGMTFILE_BDDATA:
+               case BMI_SGMTFILE_EXEC:
+                       ath10k_warn(ar,
+                                   "firmware has unsupported segment:%d\n",
+                                   base_len);
+                       ret = -EINVAL;
+                       break;
+               default:
+                       if (base_len > left) {
+                               /* sanity check */
+                               ath10k_warn(ar,
+                                           "firmware has invalid segment length, %d > %d\n",
+                                           base_len, left);
+                               ret = -EINVAL;
+                               break;
+                       }
+
+                       ret = ath10k_hw_diag_segment_download(ar,
+                                                             buf,
+                                                             base_addr,
+                                                             base_len);
+
+                       if (ret)
+                               ath10k_warn(ar,
+                                           "failed to download firmware via diag interface:%d\n",
+                                           ret);
+                       break;
+               }
+
+               if (ret || sgmt_end)
+                       break;
+
+               metadata = (struct bmi_segmented_metadata *)(buf + base_len);
+               left -= base_len;
+       }
+
+       if (ret == 0)
+               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                          "boot firmware fast diag download successfully.\n");
+       return ret;
+}
+
 const struct ath10k_hw_ops qca988x_ops = {
        .set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
 };
index 977f79ebb4fd5911bd8910d415e1ba6d492359ff..1b5da272d18cda6997c4916317a6f2386faed5e3 100644 (file)
 
 #include "targaddrs.h"
 
+enum ath10k_bus {
+       ATH10K_BUS_PCI,
+       ATH10K_BUS_AHB,
+       ATH10K_BUS_SDIO,
+       ATH10K_BUS_USB,
+       ATH10K_BUS_SNOC,
+};
+
 #define ATH10K_FW_DIR                  "ath10k"
 
 #define QCA988X_2_0_DEVICE_ID_UBNT   (0x11ac)
@@ -109,6 +117,7 @@ enum qca9377_chip_id_rev {
 #define QCA9984_HW_1_0_CHIP_ID_REV     0x0
 #define QCA9984_HW_1_0_FW_DIR          ATH10K_FW_DIR "/QCA9984/hw1.0"
 #define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA9984_HW_1_0_EBOARD_DATA_FILE "eboard.bin"
 #define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234
 
 /* QCA9888 2.0 defines */
@@ -221,6 +230,7 @@ enum ath10k_fw_htt_op_version {
 enum ath10k_bd_ie_type {
        /* contains sub IEs of enum ath10k_bd_ie_board_type */
        ATH10K_BD_IE_BOARD = 0,
+       ATH10K_BD_IE_BOARD_EXT = 1,
 };
 
 enum ath10k_bd_ie_board_type {
@@ -389,6 +399,11 @@ extern const struct ath10k_hw_ce_regs qcax_ce_regs;
 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
                                u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
 
+int ath10k_hw_diag_fast_download(struct ath10k *ar,
+                                u32 address,
+                                const void *buffer,
+                                u32 length);
+
 #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
 #define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887)
 #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
@@ -501,6 +516,7 @@ struct ath10k_hw_clk_params {
 struct ath10k_hw_params {
        u32 id;
        u16 dev_id;
+       enum ath10k_bus bus;
        const char *name;
        u32 patch_load_addr;
        int uart_pin;
@@ -539,6 +555,8 @@ struct ath10k_hw_params {
                const char *dir;
                const char *board;
                size_t board_size;
+               const char *eboard;
+               size_t ext_board_size;
                size_t board_ext_size;
        } fw;
 
@@ -589,6 +607,14 @@ struct ath10k_hw_params {
 
        /* Number of bytes to be the offset for each FFT sample */
        int spectral_bin_offset;
+
+       /* targets which require hw filter reset during boot up,
+        * to avoid it sending spurious acks.
+        */
+       bool hw_filter_reset_required;
+
+       /* target supporting fw download via diag ce */
+       bool fw_diag_ce_download;
 };
 
 struct htt_rx_desc;
@@ -1124,4 +1150,15 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
 #define RTC_SYNC_STATUS_PLL_CHANGING_MASK      0x00000020
 /* qca6174 PLL offset/mask end */
 
+/* CPU_ADDR_MSB is a register, bit[3:0] is to specify which memory
+ * region is accessed. The memory region size is 1M.
+ * If host wants to access 0xX12345 at target, then CPU_ADDR_MSB[3:0]
+ * is 0xX.
+ * The following MACROs are defined to get the 0xX and the size limit.
+ */
+#define CPU_ADDR_MSB_REGION_MASK       GENMASK(23, 20)
+#define CPU_ADDR_MSB_REGION_VAL(X)     FIELD_GET(CPU_ADDR_MSB_REGION_MASK, X)
+#define REGION_ACCESS_SIZE_LIMIT       0x100000
+#define REGION_ACCESS_SIZE_MASK                (REGION_ACCESS_SIZE_LIMIT - 1)
+
 #endif /* _HW_H_ */
index 90f9372dec2548f4eb3fc671fd63d8e12dc4c238..3933dd96da55abe52cb9e0814d1528525146e787 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "mac.h"
 
+#include <net/cfg80211.h>
 #include <net/mac80211.h>
 #include <linux/etherdevice.h>
 #include <linux/acpi.h>
@@ -29,7 +30,6 @@
 #include "htt.h"
 #include "txrx.h"
 #include "testmode.h"
-#include "wmi.h"
 #include "wmi-tlv.h"
 #include "wmi-ops.h"
 #include "wow.h"
@@ -156,6 +156,22 @@ u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
        return 0;
 }
 
+static int ath10k_mac_get_rate_hw_value(int bitrate)
+{
+       int i;
+       u8 hw_value_prefix = 0;
+
+       if (ath10k_mac_bitrate_is_cck(bitrate))
+               hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6;
+
+       for (i = 0; i < sizeof(ath10k_rates); i++) {
+               if (ath10k_rates[i].bitrate == bitrate)
+                       return hw_value_prefix | ath10k_rates[i].hw_value;
+       }
+
+       return -EINVAL;
+}
+
 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
 {
        switch ((mcs_map >> (2 * nss)) & 0x3) {
@@ -967,7 +983,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
        if (time_left == 0)
                return -ETIMEDOUT;
 
-       return 0;
+       return ar->last_wmi_vdev_start_status;
 }
 
 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
@@ -5451,9 +5467,10 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
        struct cfg80211_chan_def def;
        u32 vdev_param, pdev_param, slottime, preamble;
        u16 bitrate, hw_value;
-       u8 rate;
-       int rateidx, ret = 0;
+       u8 rate, basic_rate_idx;
+       int rateidx, ret = 0, hw_rate_code;
        enum nl80211_band band;
+       const struct ieee80211_supported_band *sband;
 
        mutex_lock(&ar->conf_mutex);
 
@@ -5659,6 +5676,30 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                                    arvif->vdev_id,  ret);
        }
 
+       if (changed & BSS_CHANGED_BASIC_RATES) {
+               if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) {
+                       mutex_unlock(&ar->conf_mutex);
+                       return;
+               }
+
+       sband = ar->hw->wiphy->bands[def.chan->band];
+       basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+       bitrate = sband->bitrates[basic_rate_idx].bitrate;
+
+       hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
+       if (hw_rate_code < 0) {
+               ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
+               mutex_unlock(&ar->conf_mutex);
+               return;
+       }
+
+       vdev_param = ar->wmi.vdev_param->mgmt_rate;
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+                                       hw_rate_code);
+       if (ret)
+               ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
+       }
+
        mutex_unlock(&ar->conf_mutex);
 }
 
@@ -6215,6 +6256,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
            new_state == IEEE80211_STA_NONE) {
                memset(arsta, 0, sizeof(*arsta));
                arsta->arvif = arvif;
+               arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
                INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
 
                for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
@@ -6243,6 +6285,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                           ar->num_stations + 1, ar->max_num_stations,
                           ar->num_peers + 1, ar->max_num_peers);
 
+               if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
+                       arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
+                                                 GFP_KERNEL);
+                       if (!arsta->tx_stats)
+                               goto exit;
+               }
+
                num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
                num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
 
@@ -6328,6 +6377,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                           "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
                           arvif->vdev_id, sta->addr, sta);
 
+               if (ath10k_debug_is_extd_tx_stats_enabled(ar))
+                       kfree(arsta->tx_stats);
+
                if (sta->tdls) {
                        ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
                                                          sta,
@@ -6768,23 +6820,17 @@ static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
        return -EOPNOTSUPP;
 }
 
-static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                        u32 queues, bool drop)
+void ath10k_mac_wait_tx_complete(struct ath10k *ar)
 {
-       struct ath10k *ar = hw->priv;
        bool skip;
        long time_left;
 
        /* mac80211 doesn't care if we really xmit queued frames or not
         * we'll collect those frames either way if we stop/delete vdevs
         */
-       if (drop)
-               return;
-
-       mutex_lock(&ar->conf_mutex);
 
        if (ar->state == ATH10K_STATE_WEDGED)
-               goto skip;
+               return;
 
        time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
                        bool empty;
@@ -6803,8 +6849,18 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        if (time_left == 0 || skip)
                ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
                            skip, ar->state, time_left);
+}
 
-skip:
+static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                        u32 queues, bool drop)
+{
+       struct ath10k *ar = hw->priv;
+
+       if (drop)
+               return;
+
+       mutex_lock(&ar->conf_mutex);
+       ath10k_mac_wait_tx_complete(ar);
        mutex_unlock(&ar->conf_mutex);
 }
 
@@ -8148,6 +8204,24 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
        },
 };
 
+static const struct
+ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = {
+       {
+               .limits = ath10k_10_4_if_limits,
+               .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
+               .max_interfaces = 16,
+               .num_different_channels = 1,
+               .beacon_int_infra_match = true,
+               .beacon_int_min_gcd = 100,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+               .radar_detect_widths =  BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+                                       BIT(NL80211_CHAN_WIDTH_20) |
+                                       BIT(NL80211_CHAN_WIDTH_40) |
+                                       BIT(NL80211_CHAN_WIDTH_80),
+#endif
+       },
+};
+
 static void ath10k_get_arvif_iter(void *data, u8 *mac,
                                  struct ieee80211_vif *vif)
 {
@@ -8310,6 +8384,10 @@ int ath10k_mac_register(struct ath10k *ar)
        void *channels;
        int ret;
 
+       if (!is_valid_ether_addr(ar->mac_addr)) {
+               ath10k_warn(ar, "invalid MAC address; choosing random\n");
+               eth_random_addr(ar->mac_addr);
+       }
        SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
 
        SET_IEEE80211_DEV(ar->hw, ar->dev);
@@ -8359,6 +8437,7 @@ int ath10k_mac_register(struct ath10k *ar)
                ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
        }
 
+       wiphy_read_of_freq_limits(ar->hw->wiphy);
        ath10k_mac_setup_ht_vht_cap(ar);
 
        ar->hw->wiphy->interface_modes =
@@ -8463,6 +8542,10 @@ int ath10k_mac_register(struct ath10k *ar)
        wiphy_ext_feature_set(ar->hw->wiphy,
                              NL80211_EXT_FEATURE_SET_SCAN_DWELL);
 
+       if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
+               wiphy_ext_feature_set(ar->hw->wiphy,
+                                     NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT);
+
        /*
         * on LL hardware queues are managed entirely by the FW
         * so we only advertise to mac we can do the queues thing
@@ -8506,6 +8589,13 @@ int ath10k_mac_register(struct ath10k *ar)
                ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
                ar->hw->wiphy->n_iface_combinations =
                        ARRAY_SIZE(ath10k_10_4_if_comb);
+               if (test_bit(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
+                            ar->wmi.svc_map)) {
+                       ar->hw->wiphy->iface_combinations =
+                               ath10k_10_4_bcn_int_if_comb;
+                       ar->hw->wiphy->n_iface_combinations =
+                               ARRAY_SIZE(ath10k_10_4_bcn_int_if_comb);
+               }
                break;
        case ATH10K_FW_WMI_OP_VERSION_UNSET:
        case ATH10K_FW_WMI_OP_VERSION_MAX:
index 81f8d6c0af353274ee87d49eb7e74052f2b830b0..570493d2d648b962b00543adb923e825d3f7a9de 100644 (file)
@@ -82,6 +82,7 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
                                            u16 peer_id,
                                            u8 tid);
 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
+void ath10k_mac_wait_tx_complete(struct ath10k *ar);
 
 static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
                                      struct sk_buff *skb)
index af2cf55c4c1e631ea075e5baa5742651b77435c1..873dbb65439f5208833a8b8ea028d9ac695ec1c9 100644 (file)
@@ -192,7 +192,7 @@ static struct ce_attr host_ce_config_wlan[] = {
 
        /* CE7: ce_diag, the Diagnostic Window */
        {
-               .flags = CE_ATTR_FLAGS,
+               .flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
                .src_nentries = 2,
                .src_sz_max = DIAG_TRANSFER_LIMIT,
                .dest_nentries = 2,
@@ -870,6 +870,21 @@ static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
        return val;
 }
 
+/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
+ * Support to access target space below 1M for qca6174 and qca9377.
+ * If target space is below 1M, the bit[20] of converted CE addr is 0.
+ * Otherwise bit[20] of converted CE addr is 1.
+ */
+static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+       u32 val = 0, region = addr & 0xfffff;
+
+       val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
+                                & 0x7ff) << 21;
+       val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
+       return val;
+}
+
 static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 {
        u32 val = 0, region = addr & 0xfffff;
@@ -931,6 +946,15 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
                goto done;
        }
 
+       /* The address supplied by the caller is in the
+        * Target CPU virtual address space.
+        *
+        * In order to use this address with the diagnostic CE,
+        * convert it from Target CPU virtual address space
+        * to CE address space
+        */
+       address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
+
        remaining_bytes = nbytes;
        ce_data = ce_data_base;
        while (remaining_bytes) {
@@ -942,16 +966,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
                        goto done;
 
                /* Request CE to send from Target(!) address to Host buffer */
-               /*
-                * The address supplied by the caller is in the
-                * Target CPU virtual address space.
-                *
-                * In order to use this address with the diagnostic CE,
-                * convert it from Target CPU virtual address space
-                * to CE address space
-                */
-               address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
-
                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
                                            0);
                if (ret)
@@ -960,8 +974,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
                i = 0;
                while (ath10k_ce_completed_send_next_nolock(ce_diag,
                                                            NULL) != 0) {
-                       mdelay(1);
-                       if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+                       udelay(DIAG_ACCESS_CE_WAIT_US);
+                       i += DIAG_ACCESS_CE_WAIT_US;
+
+                       if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
                                ret = -EBUSY;
                                goto done;
                        }
@@ -972,9 +988,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
                                                            (void **)&buf,
                                                            &completed_nbytes)
                                                                != 0) {
-                       mdelay(1);
+                       udelay(DIAG_ACCESS_CE_WAIT_US);
+                       i += DIAG_ACCESS_CE_WAIT_US;
 
-                       if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+                       if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
                                ret = -EBUSY;
                                goto done;
                        }
@@ -1119,9 +1136,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
                i = 0;
                while (ath10k_ce_completed_send_next_nolock(ce_diag,
                                                            NULL) != 0) {
-                       mdelay(1);
+                       udelay(DIAG_ACCESS_CE_WAIT_US);
+                       i += DIAG_ACCESS_CE_WAIT_US;
 
-                       if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+                       if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
                                ret = -EBUSY;
                                goto done;
                        }
@@ -1132,9 +1150,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
                                                            (void **)&buf,
                                                            &completed_nbytes)
                                                                != 0) {
-                       mdelay(1);
+                       udelay(DIAG_ACCESS_CE_WAIT_US);
+                       i += DIAG_ACCESS_CE_WAIT_US;
 
-                       if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+                       if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
                                ret = -EBUSY;
                                goto done;
                        }
@@ -1839,7 +1858,7 @@ int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
                }
        }
 
-       if (WARN_ON(!ul_set || !dl_set))
+       if (!ul_set || !dl_set)
                return -ENOENT;
 
        return 0;
@@ -2068,9 +2087,9 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
 
        ath10k_pci_irq_disable(ar);
        ath10k_pci_irq_sync(ar);
-       ath10k_pci_flush(ar);
        napi_synchronize(&ar->napi);
        napi_disable(&ar->napi);
+       ath10k_pci_flush(ar);
 
        spin_lock_irqsave(&ar_pci->ps_lock, flags);
        WARN_ON(ar_pci->ps_wake_refcount > 0);
@@ -3482,7 +3501,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        struct ath10k *ar;
        struct ath10k_pci *ar_pci;
        enum ath10k_hw_rev hw_rev;
-       u32 chip_id;
+       struct ath10k_bus_params bus_params;
        bool pci_ps;
        int (*pci_soft_reset)(struct ath10k *ar);
        int (*pci_hard_reset)(struct ath10k *ar);
@@ -3510,7 +3529,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                pci_ps = true;
                pci_soft_reset = ath10k_pci_warm_reset;
                pci_hard_reset = ath10k_pci_qca6174_chip_reset;
-               targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+               targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
                break;
        case QCA99X0_2_0_DEVICE_ID:
                hw_rev = ATH10K_HW_QCA99X0;
@@ -3538,7 +3557,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                pci_ps = true;
                pci_soft_reset = NULL;
                pci_hard_reset = ath10k_pci_qca6174_chip_reset;
-               targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+               targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
                break;
        default:
                WARN_ON(1);
@@ -3618,19 +3637,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                goto err_free_irq;
        }
 
-       chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
-       if (chip_id == 0xffffffff) {
+       bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+       bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+       if (bus_params.chip_id == 0xffffffff) {
                ath10k_err(ar, "failed to get chip id\n");
                goto err_free_irq;
        }
 
-       if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
+       if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
                ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
-                          pdev->device, chip_id);
+                          pdev->device, bus_params.chip_id);
                goto err_free_irq;
        }
 
-       ret = ath10k_core_register(ar, chip_id);
+       ret = ath10k_core_register(ar, &bus_params);
        if (ret) {
                ath10k_err(ar, "failed to register driver core: %d\n", ret);
                goto err_free_irq;
index 0ed4366571089595ee23060a6d85083ae8b1c933..e8d86331c539af5684d9660e601302cbdcff4d24 100644 (file)
@@ -207,7 +207,8 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
 #define CDC_WAR_DATA_CE     4
 
 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
-#define DIAG_ACCESS_CE_TIMEOUT_MS 10
+#define DIAG_ACCESS_CE_TIMEOUT_US 10000 /* 10 ms */
+#define DIAG_ACCESS_CE_WAIT_US 50
 
 void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
 void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);
index ea4075d456fadbdec0e8e1970ff1321eb364f4a6..310674de3cb8820ebc6f58c73a91400fc8e93b81 100644 (file)
@@ -1277,4 +1277,19 @@ struct fw_rx_desc_base {
        u8 info0;
 } __packed;
 
+#define FW_RX_DESC_FLAGS_FIRST_MSDU (1 << 0)
+#define FW_RX_DESC_FLAGS_LAST_MSDU  (1 << 1)
+#define FW_RX_DESC_C3_FAILED        (1 << 2)
+#define FW_RX_DESC_C4_FAILED        (1 << 3)
+#define FW_RX_DESC_IPV6             (1 << 4)
+#define FW_RX_DESC_TCP              (1 << 5)
+#define FW_RX_DESC_UDP              (1 << 6)
+
+struct fw_rx_desc_hl {
+       u8 info0;
+       u8 version;
+       u8 len;
+       u8 flags;
+} __packed;
+
 #endif /* _RX_DESC_H_ */
index 7f61591ce0de6b5c904fde8261c9a6689a587f5a..983ecfef1d281743044a4638eaf538a8b19e69c4 100644 (file)
@@ -1941,7 +1941,8 @@ static int ath10k_sdio_probe(struct sdio_func *func,
        struct ath10k_sdio *ar_sdio;
        struct ath10k *ar;
        enum ath10k_hw_rev hw_rev;
-       u32 chip_id, dev_id_base;
+       u32 dev_id_base;
+       struct ath10k_bus_params bus_params;
        int ret, i;
 
        /* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
@@ -2035,9 +2036,10 @@ static int ath10k_sdio_probe(struct sdio_func *func,
                goto err_free_wq;
        }
 
+       bus_params.dev_type = ATH10K_DEV_TYPE_HL;
        /* TODO: don't know yet how to get chip_id with SDIO */
-       chip_id = 0;
-       ret = ath10k_core_register(ar, chip_id);
+       bus_params.chip_id = 0;
+       ret = ath10k_core_register(ar, &bus_params);
        if (ret) {
                ath10k_err(ar, "failed to register driver core: %d\n", ret);
                goto err_free_wq;
index fa1843a7e0fdaaec3e74a96d9a8b359a921c9746..f7b5b855aab288daaa6ca56b97f1ca4ce359dc10 100644 (file)
@@ -62,6 +62,7 @@ static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
 
 static const struct ath10k_snoc_drv_priv drv_priv = {
        .hw_rev = ATH10K_HW_WCN3990,
@@ -171,7 +172,7 @@ static struct ce_attr host_ce_config_wlan[] = {
                .src_nentries = 0,
                .src_sz_max = 2048,
                .dest_nentries = 512,
-               .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+               .recv_cb = ath10k_snoc_pktlog_rx_cb,
        },
 };
 
@@ -436,6 +437,14 @@ static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
        ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
 }
 
+/* Called by lower (CE) layer when data is received from the Target.
+ * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
+ */
+static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+       ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
 static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
 {
        skb_pull(skb, sizeof(struct ath10k_htc_hdr));
@@ -616,7 +625,7 @@ static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
                }
        }
 
-       if (WARN_ON(!ul_set || !dl_set))
+       if (!ul_set || !dl_set)
                return -ENOENT;
 
        return 0;
@@ -722,14 +731,15 @@ static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
 static void ath10k_snoc_hif_stop(struct ath10k *ar)
 {
        ath10k_snoc_irq_disable(ar);
-       ath10k_snoc_buffer_cleanup(ar);
        napi_synchronize(&ar->napi);
        napi_disable(&ar->napi);
+       ath10k_snoc_buffer_cleanup(ar);
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
 }
 
 static int ath10k_snoc_hif_start(struct ath10k *ar)
 {
+       napi_enable(&ar->napi);
        ath10k_snoc_irq_enable(ar);
        ath10k_snoc_rx_post(ar);
 
@@ -792,7 +802,6 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar)
                goto err_wlan_enable;
        }
 
-       napi_enable(&ar->napi);
        return 0;
 
 err_wlan_enable:
@@ -1274,6 +1283,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
        struct ath10k *ar;
        int ret;
        u32 i;
+       struct ath10k_bus_params bus_params;
 
        of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
        if (!of_id) {
@@ -1341,7 +1351,9 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
                goto err_free_irq;
        }
 
-       ret = ath10k_core_register(ar, drv_data->hw_rev);
+       bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+       bus_params.chip_id = drv_data->hw_rev;
+       ret = ath10k_core_register(ar, &bus_params);
        if (ret) {
                ath10k_err(ar, "failed to register driver core: %d\n", ret);
                goto err_hw_power_off;
index c2b5bad0459ba3ce46ca4fe494d0428847c65a8e..b11a1c3d87b4652a60d667907277d96ae20c1846 100644 (file)
@@ -484,6 +484,10 @@ struct host_interest {
 #define QCA99X0_BOARD_DATA_SZ    12288
 #define QCA99X0_BOARD_EXT_DATA_SZ 0
 
+/* Dual band extended board data */
+#define QCA99X0_EXT_BOARD_DATA_SZ 2048
+#define EXT_BOARD_ADDRESS_OFFSET 0x3000
+
 #define QCA4019_BOARD_DATA_SZ    12064
 #define QCA4019_BOARD_EXT_DATA_SZ 0
 
index cda164f6e9f62f87e36c40c96f5fb7586d87a94c..23606b6972d09cf95628dc11ae9d5ac7d3922e33 100644 (file)
@@ -95,7 +95,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
                wake_up(&htt->empty_tx_wq);
        spin_unlock_bh(&htt->tx_lock);
 
-       dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+       if (ar->dev_type != ATH10K_DEV_TYPE_HL)
+               dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 
        ath10k_report_offchan_tx(htt->ar, msdu);
 
index d4803ff5a78a75eb7e2d63bc77a1623f146e283e..f731d35ee76d2be60ae839ed53996535bd0c6949 100644 (file)
@@ -983,7 +983,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
        struct usb_device *dev = interface_to_usbdev(interface);
        int ret, vendor_id, product_id;
        enum ath10k_hw_rev hw_rev;
-       u32 chip_id;
+       struct ath10k_bus_params bus_params;
 
        /* Assumption: All USB based chipsets (so far) are QCA9377 based.
         * If there will be newer chipsets that does not use the hw reg
@@ -1016,9 +1016,10 @@ static int ath10k_usb_probe(struct usb_interface *interface,
        ar->id.vendor = vendor_id;
        ar->id.device = product_id;
 
+       bus_params.dev_type = ATH10K_DEV_TYPE_HL;
        /* TODO: don't know yet how to get chip_id with USB */
-       chip_id = 0;
-       ret = ath10k_core_register(ar, chip_id);
+       bus_params.chip_id = 0;
+       ret = ath10k_core_register(ar, &bus_params);
        if (ret) {
                ath10k_warn(ar, "failed to register driver core: %d\n", ret);
                goto err;
index cdc1e64d52ad50e30dd5fe59a0e300635eda3f9d..731ceaed4d5a07bd8bede4a3ab78463a7258935c 100644 (file)
@@ -19,7 +19,6 @@
 #include "debug.h"
 #include "mac.h"
 #include "hw.h"
-#include "mac.h"
 #include "wmi.h"
 #include "wmi-ops.h"
 #include "wmi-tlv.h"
@@ -1569,7 +1568,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
 
        cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
 
-       cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
+       if (ar->hw_params.num_peers)
+               cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
+       else
+               cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
        cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
        cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
 
@@ -1582,7 +1584,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
        }
 
        cfg->num_peer_keys = __cpu_to_le32(2);
-       cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
+       if (ar->hw_params.num_peers)
+               cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2);
+       else
+               cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
        cfg->tx_chain_mask = __cpu_to_le32(0x7);
        cfg->rx_chain_mask = __cpu_to_le32(0x7);
        cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
index fd612d2905b055f5654019a5ee6b3d4820ad1086..25e8fa789e8d34738d1684d9b4c50795000d4ab4 100644 (file)
@@ -1307,7 +1307,8 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
        .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
        .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
        .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
-       .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .peer_sta_ps_statechg_enable =
+                               WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
        .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
        .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
        .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
@@ -1869,6 +1870,12 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
        if (ret)
                dev_kfree_skb_any(skb);
 
+       if (ret == -EAGAIN) {
+               ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
+                           cmd_id);
+               queue_work(ar->workqueue, &ar->restart_work);
+       }
+
        return ret;
 }
 
@@ -2336,7 +2343,12 @@ static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
        dma_unmap_single(ar->dev, pkt_addr->paddr,
                         msdu->len, DMA_FROM_DEVICE);
        info = IEEE80211_SKB_CB(msdu);
-       info->flags |= status;
+
+       if (status)
+               info->flags &= ~IEEE80211_TX_STAT_ACK;
+       else
+               info->flags |= IEEE80211_TX_STAT_ACK;
+
        ieee80211_tx_status_irqsafe(ar->hw, msdu);
 
        ret = 0;
@@ -2476,7 +2488,8 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
                   status->freq, status->band, status->signal,
                   status->rate_idx);
 
-       ieee80211_rx(ar->hw, skb);
+       ieee80211_rx_ni(ar->hw, skb);
+
        return 0;
 }
 
@@ -3236,18 +3249,31 @@ void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_vdev_start_ev_arg arg = {};
        int ret;
+       u32 status;
 
        ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
 
+       ar->last_wmi_vdev_start_status = 0;
+
        ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
        if (ret) {
                ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
-               return;
+               ar->last_wmi_vdev_start_status = ret;
+               goto out;
        }
 
-       if (WARN_ON(__le32_to_cpu(arg.status)))
-               return;
+       status = __le32_to_cpu(arg.status);
+       if (WARN_ON_ONCE(status)) {
+               ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
+                           status, (status == WMI_VDEV_START_CHAN_INVALID) ?
+                           "chan-invalid" : "unknown");
+               /* Setup is done one way or another though, so we should still
+                * do the completion, so don't return here.
+                */
+               ar->last_wmi_vdev_start_status = -EINVAL;
+       }
 
+out:
        complete(&ar->vdev_setup_done);
 }
 
@@ -4774,6 +4800,13 @@ ath10k_wmi_tpc_final_get_rate(struct ath10k *ar,
                }
        }
 
+       if (pream == -1) {
+               ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n",
+                           pream_idx, __le32_to_cpu(ev->chan_freq));
+               tpc = 0;
+               goto out;
+       }
+
        if (pream == 4)
                tpc = min_t(u8, ev->rates_array[rate_idx],
                            ev->max_reg_allow_pow[ch]);
@@ -5016,6 +5049,36 @@ ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
        }
 }
 
+static void
+ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_peer_sta_ps_state_chg_event *ev;
+       struct ieee80211_sta *sta;
+       struct ath10k_sta *arsta;
+       u8 peer_addr[ETH_ALEN];
+
+       lockdep_assert_held(&ar->data_lock);
+
+       ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data;
+       ether_addr_copy(peer_addr, ev->peer_macaddr.addr);
+
+       rcu_read_lock();
+
+       sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL);
+
+       if (!sta) {
+               ath10k_warn(ar, "failed to find station entry %pM\n",
+                           peer_addr);
+               goto exit;
+       }
+
+       arsta = (struct ath10k_sta *)sta->drv_priv;
+       arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state);
+
+exit:
+       rcu_read_unlock();
+}
+
 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
 {
        ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
@@ -5449,7 +5512,8 @@ int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
                   arg.mac_addr,
                   __le32_to_cpu(arg.status));
 
-       ether_addr_copy(ar->mac_addr, arg.mac_addr);
+       if (is_zero_ether_addr(ar->mac_addr))
+               ether_addr_copy(ar->mac_addr, arg.mac_addr);
        complete(&ar->wmi.unified_ready);
        return 0;
 }
@@ -5945,6 +6009,9 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
                ath10k_dbg(ar, ATH10K_DBG_WMI,
                           "received event id %d not implemented\n", id);
                break;
+       case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID:
+               ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
+               break;
        default:
                ath10k_warn(ar, "Unknown eventid: %d\n", id);
                break;
@@ -6062,6 +6129,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
        case WMI_10_4_DFS_STATUS_CHECK_EVENTID:
                ath10k_wmi_event_dfs_status_check(ar, skb);
                break;
+       case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID:
+               ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
+               break;
        default:
                ath10k_warn(ar, "Unknown eventid: %d\n", id);
                break;
index 36220258e3c7e686af0ffb5a2ab82869b8be5b01..f67c52757ea648439f60056e1c2d57601f625bdc 100644 (file)
@@ -203,6 +203,8 @@ enum wmi_service {
        WMI_SERVICE_TPC_STATS_FINAL,
        WMI_SERVICE_RESET_CHIP,
        WMI_SERVICE_SPOOF_MAC_SUPPORT,
+       WMI_SERVICE_TX_DATA_ACK_RSSI,
+       WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
 
        /* keep last */
        WMI_SERVICE_MAX,
@@ -350,6 +352,13 @@ enum wmi_10_4_service {
        WMI_10_4_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
        WMI_10_4_SERVICE_HOST_DFS_CHECK_SUPPORT,
        WMI_10_4_SERVICE_TPC_STATS_FINAL,
+       WMI_10_4_SERVICE_CFR_CAPTURE_SUPPORT,
+       WMI_10_4_SERVICE_TX_DATA_ACK_RSSI,
+       WMI_10_4_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_LEGACY,
+       WMI_10_4_SERVICE_PER_PACKET_SW_ENCRYPT,
+       WMI_10_4_SERVICE_PEER_TID_CONFIGS_SUPPORT,
+       WMI_10_4_SERVICE_VDEV_BCN_RATE_CONTROL,
+       WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
 };
 
 static inline char *wmi_service_name(int service_id)
@@ -463,6 +472,8 @@ static inline char *wmi_service_name(int service_id)
        SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT);
        SVCSTR(WMI_SERVICE_TPC_STATS_FINAL);
        SVCSTR(WMI_SERVICE_RESET_CHIP);
+       SVCSTR(WMI_SERVICE_TX_DATA_ACK_RSSI);
+       SVCSTR(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT);
        default:
                return NULL;
        }
@@ -771,6 +782,10 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
               WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, len);
        SVCMAP(WMI_10_4_SERVICE_TPC_STATS_FINAL,
               WMI_SERVICE_TPC_STATS_FINAL, len);
+       SVCMAP(WMI_10_4_SERVICE_TX_DATA_ACK_RSSI,
+              WMI_SERVICE_TX_DATA_ACK_RSSI, len);
+       SVCMAP(WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
+              WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, len);
 }
 
 #undef SVCMAP
@@ -2924,6 +2939,7 @@ enum wmi_coex_version {
  * @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host
  *     enable/disable
  * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable
+ * @WMI_10_4_TX_DATA_ACK_RSSI: Enable DATA ACK RSSI if firmware is capable
  */
 enum wmi_10_4_feature_mask {
        WMI_10_4_LTEU_SUPPORT                   = BIT(0),
@@ -2939,6 +2955,7 @@ enum wmi_10_4_feature_mask {
        WMI_10_4_TDLS_UAPSD_SLEEP_STA           = BIT(10),
        WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE = BIT(11),
        WMI_10_4_TDLS_EXPLICIT_MODE_ONLY        = BIT(12),
+       WMI_10_4_TX_DATA_ACK_RSSI               = BIT(16),
 
 };
 
@@ -4153,6 +4170,13 @@ enum wmi_tpc_pream_5ghz {
        WMI_TPC_PREAM_5GHZ_HTCUP,
 };
 
+#define        WMI_PEER_PS_STATE_DISABLED      2
+
+struct wmi_peer_sta_ps_state_chg_event {
+       struct wmi_mac_addr peer_macaddr;
+       __le32 peer_ps_state;
+} __packed;
+
 struct wmi_pdev_chanlist_update_event {
        /* number of channels */
        __le32 num_chan;
@@ -4958,10 +4982,15 @@ enum wmi_rate_preamble {
 #define ATH10K_HW_GI(flags)            (((flags) >> 5) & 0x1)
 #define ATH10K_HW_RATECODE(rate, nss, preamble) \
        (((preamble) << 6) | ((nss) << 4) | (rate))
+#define ATH10K_HW_AMPDU(flags)         ((flags) & 0x1)
+#define ATH10K_HW_BA_FAIL(flags)       (((flags) >> 1) & 0x3)
 
-#define VHT_MCS_NUM     10
-#define VHT_BW_NUM      4
-#define VHT_NSS_NUM     4
+#define ATH10K_VHT_MCS_NUM     10
+#define ATH10K_BW_NUM          4
+#define ATH10K_NSS_NUM         4
+#define ATH10K_LEGACY_NUM      12
+#define ATH10K_GI_NUM          2
+#define ATH10K_HT_MCS_NUM      32
 
 /* Value to disable fixed rate setting */
 #define WMI_FIXED_RATE_NONE    (0xff)
@@ -6642,11 +6671,17 @@ struct wmi_ch_info_ev_arg {
        __le32 rx_frame_count;
 };
 
+/* From 10.4 firmware, not sure all have the same values. */
+enum wmi_vdev_start_status {
+       WMI_VDEV_START_OK = 0,
+       WMI_VDEV_START_CHAN_INVALID,
+};
+
 struct wmi_vdev_start_ev_arg {
        __le32 vdev_id;
        __le32 req_id;
        __le32 resp_type; /* %WMI_VDEV_RESP_ */
-       __le32 status;
+       __le32 status; /* See wmi_vdev_start_status enum above */
 };
 
 struct wmi_peer_kick_ev_arg {
index a6b179f88d36343f9fdd5b61fe08c9d8638567f7..af444dfecae9840e04f55653b674ada2e1950ea6 100644 (file)
@@ -374,6 +374,8 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
                goto cleanup;
        }
 
+       ath10k_mac_wait_tx_complete(ar);
+
        ret = ath10k_wow_enable(ar);
        if (ret) {
                ath10k_warn(ar, "failed to start wow: %d\n", ret);
index e01faf641288fbac6e7412c18025fac5537c52c9..94f70047d3fcb2131946c59d241ac2e0a89c9539 100644 (file)
@@ -1028,8 +1028,6 @@ ath5k_debug_dump_bands(struct ath5k_hw *ah)
        if (likely(!(ah->debug.level & ATH5K_DEBUG_DUMPBANDS)))
                return;
 
-       BUG_ON(!ah->sbands);
-
        for (b = 0; b < NUM_NL80211_BANDS; b++) {
                struct ieee80211_supported_band *band = &ah->sbands[b];
                char bname[6];
index 58fb227a849f68e3aaa918b80ba2a0e1596bbfc3..54132af700943a48f0935ef05bd08734b8d31e27 100644 (file)
@@ -710,8 +710,8 @@ static bool check_device_tree(struct ath6kl *ar)
        for_each_compatible_node(node, NULL, "atheros,ath6kl") {
                board_id = of_get_property(node, board_id_prop, NULL);
                if (board_id == NULL) {
-                       ath6kl_warn("No \"%s\" property on %s node.\n",
-                                   board_id_prop, node->name);
+                       ath6kl_warn("No \"%s\" property on %pOFn node.\n",
+                                   board_id_prop, node);
                        continue;
                }
                snprintf(board_filename, sizeof(board_filename),
index 0c61dbaa62a410529dcba1d49f0ade9c90012622..cb59016c723b47f96c325f5fd5caea89d23d093a 100644 (file)
@@ -638,7 +638,7 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
        memcpy(vif->bssid, bssid, sizeof(vif->bssid));
        vif->bss_ch = channel;
 
-       if ((vif->nw_type == INFRA_NETWORK)) {
+       if (vif->nw_type == INFRA_NETWORK) {
                ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
                                              vif->listen_intvl_t, 0);
                ath6kl_check_ch_switch(ar, channel);
index ef2dd68d3f779fb26de576e429f0ce98f75dc33e..11d6f975c87dfd232fc5440e2528a7a6a4a9d0b3 100644 (file)
 #define AR5008_11NG_HT_SS_SHIFT                12
 #define AR5008_11NG_HT_DS_SHIFT                20
 
-static const int firstep_table[] =
-/* level:  0   1   2   3   4   5   6   7   8  */
-       { -4, -2,  0,  2,  4,  6,  8, 10, 12 }; /* lvl 0-8, default 2 */
-
 /*
  * register values to turn OFDM weak signal detection OFF
  */
index 239429f103781c096c0c55fee90e5f94bad98967..53ca4b063eb9e7e10984dc7af516780b5032619d 100644 (file)
@@ -144,6 +144,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
        RXS_ERR("BEACONS", rx_beacons);
        RXS_ERR("FRAGS", rx_frags);
        RXS_ERR("SPECTRAL", rx_spectral);
+       RXS_ERR("SPECTRAL SMPL GOOD", rx_spectral_sample_good);
+       RXS_ERR("SPECTRAL SMPL ERR", rx_spectral_sample_err);
 
        RXS_ERR("CRC ERR", crc_err);
        RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
index 3376990d3a24803cf190c736f1e0fefd1660d3d9..2938b5b96b074983cb0ceb04f71d68f4d0c65eac 100644 (file)
@@ -39,6 +39,8 @@
  * @rx_beacons:  No. of beacons received.
  * @rx_frags:  No. of rx-fragements received.
  * @rx_spectral: No of spectral packets received.
+ * @rx_spectral_sample_good: No. of good spectral samples
+ * @rx_spectral_sample_err: No. of good spectral samples
  */
 struct ath_rx_stats {
        u32 rx_pkts_all;
@@ -58,6 +60,8 @@ struct ath_rx_stats {
        u32 rx_beacons;
        u32 rx_frags;
        u32 rx_spectral;
+       u32 rx_spectral_sample_good;
+       u32 rx_spectral_sample_err;
 };
 
 #ifdef CONFIG_ATH9K_COMMON_DEBUG
index 440e16e641e4a6b774b0d6b715f1b059c5727198..6a43d26276e529332bb065baf4523ceb8722221d 100644 (file)
@@ -59,8 +59,7 @@ ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
 
        sample = sample_end - SPECTRAL_HT20_SAMPLE_LEN + 1;
 
-       max_index = spectral_max_index(mag_info->all_bins,
-                                      SPECTRAL_HT20_NUM_BINS);
+       max_index = spectral_max_index_ht20(mag_info->all_bins);
        max_magnitude = spectral_max_magnitude(mag_info->all_bins);
 
        max_exp = mag_info->max_exp & 0xf;
@@ -72,7 +71,7 @@ ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
        if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN && max_index < 1)
                return -1;
 
-       if (sample[max_index] != (max_magnitude >> max_exp))
+       if ((sample[max_index] & 0xf8) != ((max_magnitude >> max_exp) & 0xf8))
                return -1;
        else
                return 0;
@@ -100,12 +99,10 @@ ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
        sample = sample_end - SPECTRAL_HT20_40_SAMPLE_LEN + 1;
 
        lower_mag = spectral_max_magnitude(mag_info->lower_bins);
-       lower_max_index = spectral_max_index(mag_info->lower_bins,
-                                            SPECTRAL_HT20_40_NUM_BINS);
+       lower_max_index = spectral_max_index_ht40(mag_info->lower_bins);
 
        upper_mag = spectral_max_magnitude(mag_info->upper_bins);
-       upper_max_index = spectral_max_index(mag_info->upper_bins,
-                                            SPECTRAL_HT20_40_NUM_BINS);
+       upper_max_index = spectral_max_index_ht40(mag_info->upper_bins);
 
        max_exp = mag_info->max_exp & 0xf;
 
@@ -117,19 +114,10 @@ ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
           ((upper_max_index < 1) || (lower_max_index < 1)))
                return -1;
 
-       /* Some time hardware messes up the index and adds
-        * the index of the middle point (dc_pos). Try to fix it.
-        */
-       if ((upper_max_index - dc_pos > 0) &&
-          (sample[upper_max_index] == (upper_mag >> max_exp)))
-               upper_max_index -= dc_pos;
-
-       if ((lower_max_index - dc_pos > 0) &&
-          (sample[lower_max_index - dc_pos] == (lower_mag >> max_exp)))
-               lower_max_index -= dc_pos;
-
-       if ((sample[upper_max_index + dc_pos] != (upper_mag >> max_exp)) ||
-          (sample[lower_max_index] != (lower_mag >> max_exp)))
+       if (((sample[upper_max_index + dc_pos] & 0xf8) !=
+            ((upper_mag >> max_exp) & 0xf8)) ||
+           ((sample[lower_max_index] & 0xf8) !=
+            ((lower_mag >> max_exp) & 0xf8)))
                return -1;
        else
                return 0;
@@ -169,8 +157,7 @@ ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
        magnitude = spectral_max_magnitude(mag_info->all_bins);
        fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
 
-       max_index = spectral_max_index(mag_info->all_bins,
-                                       SPECTRAL_HT20_NUM_BINS);
+       max_index = spectral_max_index_ht20(mag_info->all_bins);
        fft_sample_20.max_index = max_index;
 
        bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
@@ -188,7 +175,8 @@ ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
                                        magnitude >> max_exp,
                                        max_index);
 
-       if (fft_sample_20.data[max_index] != (magnitude >> max_exp)) {
+       if ((fft_sample_20.data[max_index] & 0xf8) !=
+           ((magnitude >> max_exp) & 0xf8)) {
                ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
                ret = -1;
        }
@@ -302,12 +290,10 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
        upper_mag = spectral_max_magnitude(mag_info->upper_bins);
        fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
 
-       lower_max_index = spectral_max_index(mag_info->lower_bins,
-                                       SPECTRAL_HT20_40_NUM_BINS);
+       lower_max_index = spectral_max_index_ht40(mag_info->lower_bins);
        fft_sample_40.lower_max_index = lower_max_index;
 
-       upper_max_index = spectral_max_index(mag_info->upper_bins,
-                                       SPECTRAL_HT20_40_NUM_BINS);
+       upper_max_index = spectral_max_index_ht40(mag_info->upper_bins);
        fft_sample_40.upper_max_index = upper_max_index;
 
        lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
@@ -331,29 +317,13 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
                                        upper_mag >> max_exp,
                                        upper_max_index);
 
-       /* Some time hardware messes up the index and adds
-        * the index of the middle point (dc_pos). Try to fix it.
-        */
-       if ((upper_max_index - dc_pos > 0) &&
-          (fft_sample_40.data[upper_max_index] == (upper_mag >> max_exp))) {
-               upper_max_index -= dc_pos;
-               fft_sample_40.upper_max_index = upper_max_index;
-       }
-
-       if ((lower_max_index - dc_pos > 0) &&
-          (fft_sample_40.data[lower_max_index - dc_pos] ==
-          (lower_mag >> max_exp))) {
-               lower_max_index -= dc_pos;
-               fft_sample_40.lower_max_index = lower_max_index;
-       }
-
        /* Check if we got the expected magnitude values at
         * the expected bins
         */
-       if ((fft_sample_40.data[upper_max_index + dc_pos]
-           != (upper_mag >> max_exp)) ||
-          (fft_sample_40.data[lower_max_index]
-           != (lower_mag >> max_exp))) {
+       if (((fft_sample_40.data[upper_max_index + dc_pos] & 0xf8)
+           != ((upper_mag >> max_exp) & 0xf8)) ||
+          ((fft_sample_40.data[lower_max_index] & 0xf8)
+           != ((lower_mag >> max_exp) & 0xf8))) {
                ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
                ret = -1;
        }
@@ -411,7 +381,7 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
 
                ath_dbg(common, SPECTRAL_SCAN,
                        "Calculated new upper max 0x%X at %i\n",
-                       tmp_mag, i);
+                       tmp_mag, fft_sample_40.upper_max_index);
        } else
        for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
                if (fft_sample_40.data[i] == (upper_mag >> max_exp))
@@ -501,6 +471,7 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
        u8 sample_buf[SPECTRAL_SAMPLE_MAX_LEN] = {0};
        struct ath_hw *ah = spec_priv->ah;
        struct ath_common *common = ath9k_hw_common(spec_priv->ah);
+       struct ath_softc *sc = (struct ath_softc *)common->priv;
        u8 num_bins, *vdata = (u8 *)hdr;
        struct ath_radar_info *radar_info;
        int len = rs->rs_datalen;
@@ -649,8 +620,13 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
                                                       sample_buf, sample_len,
                                                       sample_bytes);
 
-                               fft_handler(rs, spec_priv, sample_buf,
-                                           tsf, freq, chan_type);
+                               ret = fft_handler(rs, spec_priv, sample_buf,
+                                                 tsf, freq, chan_type);
+
+                               if (ret == 0)
+                                       RX_STAT_INC(rx_spectral_sample_good);
+                               else
+                                       RX_STAT_INC(rx_spectral_sample_err);
 
                                memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
 
@@ -665,6 +641,11 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
                                ret = fft_handler(rs, spec_priv, sample_start,
                                                  tsf, freq, chan_type);
 
+                               if (ret == 0)
+                                       RX_STAT_INC(rx_spectral_sample_good);
+                               else
+                                       RX_STAT_INC(rx_spectral_sample_err);
+
                                /* Mix the received bins to the /dev/random
                                 * pool
                                 */
@@ -675,7 +656,7 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
                         * loop.
                         */
                        if (len <= fft_len + 2)
-                               break;
+                               return 1;
 
                        sample_start = &vdata[i + 1];
 
index 303ab470ce34b6d92efaade5492ddd2515ccfdd2..011d8ab8b9744610d89dc809449ea5b99cd86fae 100644 (file)
@@ -145,6 +145,23 @@ static inline u8 spectral_max_index(u8 *bins, int num_bins)
        return m;
 }
 
+static inline u8 spectral_max_index_ht40(u8 *bins)
+{
+       u8 idx;
+
+       idx = spectral_max_index(bins, SPECTRAL_HT20_40_NUM_BINS);
+
+       /* positive values and zero are starting at the beginning
+        * of the data field.
+        */
+       return idx % (SPECTRAL_HT20_40_NUM_BINS / 2);
+}
+
+static inline u8 spectral_max_index_ht20(u8 *bins)
+{
+       return spectral_max_index(bins, SPECTRAL_HT20_NUM_BINS);
+}
+
 /* return the bitmap weight from the all/upper/lower bins */
 static inline u8 spectral_bitmap_weight(u8 *bins)
 {
index 0a6eb8a8c1ed01ada9d6b10fb861f449f27a637e..c871b7ec5011f1b1e4504739afce9cc445c96266 100644 (file)
@@ -990,19 +990,6 @@ static int read_file_dump_nfcal(struct seq_file *file, void *data)
        return 0;
 }
 
-static int open_file_dump_nfcal(struct inode *inode, struct file *f)
-{
-       return single_open(f, read_file_dump_nfcal, inode->i_private);
-}
-
-static const struct file_operations fops_dump_nfcal = {
-       .read = seq_read,
-       .open = open_file_dump_nfcal,
-       .owner = THIS_MODULE,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
                                size_t count, loff_t *ppos)
index a6f45f1bb5bb2f24abdcb7311218bd55abd720f2..e8fcd3e1c47057ae5b8f7626b2fbdc27284ef27c 100644 (file)
@@ -116,7 +116,7 @@ void ath_debug_rate_stats(struct ath_softc *sc,
                if (rxs->rate_idx >= ARRAY_SIZE(rstats->ht_stats))
                        goto exit;
 
-               if ((rxs->bw == RATE_INFO_BW_40))
+               if (rxs->bw == RATE_INFO_BW_40)
                        rstats->ht_stats[rxs->rate_idx].ht40_cnt++;
                else
                        rstats->ht_stats[rxs->rate_idx].ht20_cnt++;
@@ -286,9 +286,25 @@ static ssize_t read_airtime(struct file *file, char __user *user_buf,
        return retval;
 }
 
+static ssize_t
+write_airtime_reset_stub(struct file *file, const char __user *ubuf,
+                  size_t count, loff_t *ppos)
+{
+       struct ath_node *an = file->private_data;
+       struct ath_airtime_stats *astats;
+       int i;
+
+       astats = &an->airtime_stats;
+       astats->rx_airtime = 0;
+       astats->tx_airtime = 0;
+       for (i = 0; i < 4; i++)
+               an->airtime_deficit[i] = ATH_AIRTIME_QUANTUM;
+       return count;
+}
 
 static const struct file_operations fops_airtime = {
        .read = read_airtime,
+       .write = write_airtime_reset_stub,
        .open = simple_open,
        .owner = THIS_MODULE,
        .llseek = default_llseek,
@@ -304,5 +320,5 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
 
        debugfs_create_file("node_aggr", 0444, dir, an, &fops_node_aggr);
        debugfs_create_file("node_recv", 0444, dir, an, &fops_node_recv);
-       debugfs_create_file("airtime", 0444, dir, an, &fops_airtime);
+       debugfs_create_file("airtime", 0644, dir, an, &fops_airtime);
 }
index 1049773378f274e2f8c4ccf1050cb58f50858366..c85f613e8ceb5e0a8ff3ba92a20e170a55ed3825 100644 (file)
@@ -1251,8 +1251,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
        struct ath_vif *avp = (void *)vif->drv_priv;
        struct ath_node *an = &avp->mcast_node;
 
-       mutex_lock(&sc->mutex);
-
        if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
                if (sc->cur_chan->nvifs >= 1) {
                        mutex_unlock(&sc->mutex);
@@ -1261,6 +1259,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
                sc->tx99_vif = vif;
        }
 
+       mutex_lock(&sc->mutex);
+
        ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
        sc->cur_chan->nvifs++;
 
index ce50d8f5835e03cf22cbb7f19c2bb1bc0b61296c..95544ce05acf9d35f7dc64b8f6dd5fb7c1e32b25 100644 (file)
@@ -56,11 +56,6 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
        struct sk_buff *skb;
        struct ath_vif *avp;
 
-       if (!sc->tx99_vif)
-               return NULL;
-
-       avp = (struct ath_vif *)sc->tx99_vif->drv_priv;
-
        skb = alloc_skb(len, GFP_KERNEL);
        if (!skb)
                return NULL;
@@ -77,7 +72,10 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
        memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
        memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
 
-       hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
+       if (sc->tx99_vif) {
+               avp = (struct ath_vif *) sc->tx99_vif->drv_priv;
+               hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
+       }
 
        tx_info = IEEE80211_SKB_CB(skb);
        memset(tx_info, 0, sizeof(*tx_info));
index 0cb5b58925dc4d55026580b560f4824486fc19df..8c75651ede6c3b5727ec01b9c50fad5077d567b5 100644 (file)
@@ -246,8 +246,8 @@ static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb)
         *    of available memory blocks, so the number can
         *    never execeed the mem_blocks count.
         */
-       if (unlikely(WARN_ON_ONCE(cookie == 0) ||
-           WARN_ON_ONCE(cookie > ar->fw.mem_blocks)))
+       if (WARN_ON_ONCE(cookie == 0) ||
+           WARN_ON_ONCE(cookie > ar->fw.mem_blocks))
                return;
 
        atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size),
index 06cfe8d311f39ba3bcdc8a0d7756d05693a5d367..5ab3e31c9ffadab87a57617701650117bc1fd28d 100644 (file)
@@ -174,13 +174,12 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn
        int i;
 
        size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
-       wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
-                                             GFP_KERNEL);
+       wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size,
+                                              &wcn_ch->dma_addr,
+                                              GFP_KERNEL);
        if (!wcn_ch->cpu_addr)
                return -ENOMEM;
 
-       memset(wcn_ch->cpu_addr, 0, size);
-
        cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
        cur_ctl = wcn_ch->head_blk_ctl;
 
@@ -628,13 +627,13 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
                16 - (WCN36XX_BD_CHUNK_SIZE % 8);
 
        s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
-       cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
-                                     GFP_KERNEL);
+       cpu_addr = dma_zalloc_coherent(wcn->dev, s,
+                                      &wcn->mgmt_mem_pool.phy_addr,
+                                      GFP_KERNEL);
        if (!cpu_addr)
                goto out_err;
 
        wcn->mgmt_mem_pool.virt_addr = cpu_addr;
-       memset(cpu_addr, 0, s);
 
        /* Allocate BD headers for DATA frames */
 
@@ -643,13 +642,13 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
                16 - (WCN36XX_BD_CHUNK_SIZE % 8);
 
        s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
-       cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
-                                     GFP_KERNEL);
+       cpu_addr = dma_zalloc_coherent(wcn->dev, s,
+                                      &wcn->data_mem_pool.phy_addr,
+                                      GFP_KERNEL);
        if (!cpu_addr)
                goto out_err;
 
        wcn->data_mem_pool.virt_addr = cpu_addr;
-       memset(cpu_addr, 0, s);
 
        return 0;
 
index 00098f24116dea042ad5e57526e4a4cf6b64183d..1d2d698fb77933a8eab236b1b31dc6db240bd484 100644 (file)
@@ -792,10 +792,10 @@ static int wcn36xx_smd_process_ptt_msg_rsp(void *buf, size_t len,
                         rsp->header.len - sizeof(rsp->ptt_msg_resp_status));
 
        if (rsp->header.len > 0) {
-               *p_ptt_rsp_msg = kmalloc(rsp->header.len, GFP_ATOMIC);
+               *p_ptt_rsp_msg = kmemdup(rsp->ptt_msg, rsp->header.len,
+                                        GFP_ATOMIC);
                if (!*p_ptt_rsp_msg)
                        return -ENOMEM;
-               memcpy(*p_ptt_rsp_msg, rsp->ptt_msg, rsp->header.len);
        }
        return ret;
 }
index f79c337105cb465f6895f39e7d37d7d1d624db74..d18e81fae5f10abe87a84d7e81f52b20a49791c2 100644 (file)
@@ -48,9 +48,29 @@ static struct ieee80211_channel wil_60ghz_channels[] = {
        CHAN60G(1, 0),
        CHAN60G(2, 0),
        CHAN60G(3, 0),
-/* channel 4 not supported yet */
+       CHAN60G(4, 0),
 };
 
+static int wil_num_supported_channels(struct wil6210_priv *wil)
+{
+       int num_channels = ARRAY_SIZE(wil_60ghz_channels);
+
+       if (!test_bit(WMI_FW_CAPABILITY_CHANNEL_4, wil->fw_capabilities))
+               num_channels--;
+
+       return num_channels;
+}
+
+void update_supported_bands(struct wil6210_priv *wil)
+{
+       struct wiphy *wiphy = wil_to_wiphy(wil);
+
+       wil_dbg_misc(wil, "update supported bands");
+
+       wiphy->bands[NL80211_BAND_60GHZ]->n_channels =
+                                               wil_num_supported_channels(wil);
+}
+
 /* Vendor id to be used in vendor specific command and events
  * to user space.
  * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID,
@@ -199,7 +219,9 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
                .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
                BIT(IEEE80211_STYPE_PROBE_RESP >> 4) |
                BIT(IEEE80211_STYPE_ASSOC_RESP >> 4) |
-               BIT(IEEE80211_STYPE_DISASSOC >> 4),
+               BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+               BIT(IEEE80211_STYPE_AUTH >> 4) |
+               BIT(IEEE80211_STYPE_REASSOC_RESP >> 4),
                .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
                BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
                BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
@@ -871,6 +893,26 @@ static void wil_print_crypto(struct wil6210_priv *wil,
                     c->control_port_no_encrypt);
 }
 
+static const char *
+wil_get_auth_type_name(enum nl80211_auth_type auth_type)
+{
+       switch (auth_type) {
+       case NL80211_AUTHTYPE_OPEN_SYSTEM:
+               return "OPEN_SYSTEM";
+       case NL80211_AUTHTYPE_SHARED_KEY:
+               return "SHARED_KEY";
+       case NL80211_AUTHTYPE_FT:
+               return "FT";
+       case NL80211_AUTHTYPE_NETWORK_EAP:
+               return "NETWORK_EAP";
+       case NL80211_AUTHTYPE_SAE:
+               return "SAE";
+       case NL80211_AUTHTYPE_AUTOMATIC:
+               return "AUTOMATIC";
+       default:
+               return "unknown";
+       }
+}
 static void wil_print_connect_params(struct wil6210_priv *wil,
                                     struct cfg80211_connect_params *sme)
 {
@@ -884,11 +926,73 @@ static void wil_print_connect_params(struct wil6210_priv *wil,
        if (sme->ssid)
                print_hex_dump(KERN_INFO, "  SSID: ", DUMP_PREFIX_OFFSET,
                               16, 1, sme->ssid, sme->ssid_len, true);
+       if (sme->prev_bssid)
+               wil_info(wil, "  Previous BSSID=%pM\n", sme->prev_bssid);
+       wil_info(wil, "  Auth Type: %s\n",
+                wil_get_auth_type_name(sme->auth_type));
        wil_info(wil, "  Privacy: %s\n", sme->privacy ? "secure" : "open");
        wil_info(wil, "  PBSS: %d\n", sme->pbss);
        wil_print_crypto(wil, &sme->crypto);
 }
 
+static int wil_ft_connect(struct wiphy *wiphy,
+                         struct net_device *ndev,
+                         struct cfg80211_connect_params *sme)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct wil6210_vif *vif = ndev_to_vif(ndev);
+       struct wmi_ft_auth_cmd auth_cmd;
+       int rc;
+
+       if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, wil->fw_capabilities)) {
+               wil_err(wil, "FT: FW does not support FT roaming\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (!sme->prev_bssid) {
+               wil_err(wil, "FT: prev_bssid was not set\n");
+               return -EINVAL;
+       }
+
+       if (ether_addr_equal(sme->prev_bssid, sme->bssid)) {
+               wil_err(wil, "FT: can not roam to same AP\n");
+               return -EINVAL;
+       }
+
+       if (!test_bit(wil_vif_fwconnected, vif->status)) {
+               wil_err(wil, "FT: roam while not connected\n");
+               return -EINVAL;
+       }
+
+       if (vif->privacy != sme->privacy) {
+               wil_err(wil, "FT: privacy mismatch, current (%d) roam (%d)\n",
+                       vif->privacy, sme->privacy);
+               return -EINVAL;
+       }
+
+       if (sme->pbss) {
+               wil_err(wil, "FT: roam is not valid for PBSS\n");
+               return -EINVAL;
+       }
+
+       memset(&auth_cmd, 0, sizeof(auth_cmd));
+       auth_cmd.channel = sme->channel->hw_value - 1;
+       ether_addr_copy(auth_cmd.bssid, sme->bssid);
+
+       wil_info(wil, "FT: roaming\n");
+
+       set_bit(wil_vif_ft_roam, vif->status);
+       rc = wmi_send(wil, WMI_FT_AUTH_CMDID, vif->mid,
+                     &auth_cmd, sizeof(auth_cmd));
+       if (rc == 0)
+               mod_timer(&vif->connect_timer,
+                         jiffies + msecs_to_jiffies(5000));
+       else
+               clear_bit(wil_vif_ft_roam, vif->status);
+
+       return rc;
+}
+
 static int wil_cfg80211_connect(struct wiphy *wiphy,
                                struct net_device *ndev,
                                struct cfg80211_connect_params *sme)
@@ -901,14 +1005,23 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        const u8 *rsn_eid;
        int ch;
        int rc = 0;
+       bool is_ft_roam = false;
+       u8 network_type;
        enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
 
        wil_dbg_misc(wil, "connect, mid=%d\n", vif->mid);
        wil_print_connect_params(wil, sme);
 
-       if (test_bit(wil_vif_fwconnecting, vif->status) ||
+       if (sme->auth_type == NL80211_AUTHTYPE_FT)
+               is_ft_roam = true;
+       if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC &&
            test_bit(wil_vif_fwconnected, vif->status))
-               return -EALREADY;
+               is_ft_roam = true;
+
+       if (!is_ft_roam)
+               if (test_bit(wil_vif_fwconnecting, vif->status) ||
+                   test_bit(wil_vif_fwconnected, vif->status))
+                       return -EALREADY;
 
        if (sme->ie_len > WMI_MAX_IE_LEN) {
                wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len);
@@ -918,8 +1031,13 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        rsn_eid = sme->ie ?
                        cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
                        NULL;
-       if (sme->privacy && !rsn_eid)
+       if (sme->privacy && !rsn_eid) {
                wil_info(wil, "WSC connection\n");
+               if (is_ft_roam) {
+                       wil_err(wil, "No WSC with FT roam\n");
+                       return -EINVAL;
+               }
+       }
 
        if (sme->pbss)
                bss_type = IEEE80211_BSS_TYPE_PBSS;
@@ -941,6 +1059,45 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        vif->privacy = sme->privacy;
        vif->pbss = sme->pbss;
 
+       rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
+       if (rc)
+               goto out;
+
+       switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) {
+       case WLAN_CAPABILITY_DMG_TYPE_AP:
+               network_type = WMI_NETTYPE_INFRA;
+               break;
+       case WLAN_CAPABILITY_DMG_TYPE_PBSS:
+               network_type = WMI_NETTYPE_P2P;
+               break;
+       default:
+               wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
+                       bss->capability);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       ch = bss->channel->hw_value;
+       if (ch == 0) {
+               wil_err(wil, "BSS at unknown frequency %dMhz\n",
+                       bss->channel->center_freq);
+               rc = -EOPNOTSUPP;
+               goto out;
+       }
+
+       if (is_ft_roam) {
+               if (network_type != WMI_NETTYPE_INFRA) {
+                       wil_err(wil, "FT: Unsupported BSS type, capability= 0x%04x\n",
+                               bss->capability);
+                       rc = -EINVAL;
+                       goto out;
+               }
+               rc = wil_ft_connect(wiphy, ndev, sme);
+               if (rc == 0)
+                       vif->bss = bss;
+               goto out;
+       }
+
        if (vif->privacy) {
                /* For secure assoc, remove old keys */
                rc = wmi_del_cipher_key(vif, 0, bss->bssid,
@@ -957,28 +1114,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
                }
        }
 
-       /* WMI_SET_APPIE_CMD. ie may contain rsn info as well as other info
-        * elements. Send it also in case it's empty, to erase previously set
-        * ies in FW.
-        */
-       rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
-       if (rc)
-               goto out;
-
        /* WMI_CONNECT_CMD */
        memset(&conn, 0, sizeof(conn));
-       switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) {
-       case WLAN_CAPABILITY_DMG_TYPE_AP:
-               conn.network_type = WMI_NETTYPE_INFRA;
-               break;
-       case WLAN_CAPABILITY_DMG_TYPE_PBSS:
-               conn.network_type = WMI_NETTYPE_P2P;
-               break;
-       default:
-               wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
-                       bss->capability);
-               goto out;
-       }
+       conn.network_type = network_type;
        if (vif->privacy) {
                if (rsn_eid) { /* regular secure connection */
                        conn.dot11_auth_mode = WMI_AUTH11_SHARED;
@@ -998,14 +1136,6 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
 
        conn.ssid_len = min_t(u8, ssid_eid[1], 32);
        memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
-
-       ch = bss->channel->hw_value;
-       if (ch == 0) {
-               wil_err(wil, "BSS at unknown frequency %dMhz\n",
-                       bss->channel->center_freq);
-               rc = -EOPNOTSUPP;
-               goto out;
-       }
        conn.channel = ch - 1;
 
        ether_addr_copy(conn.bssid, bss->bssid);
@@ -1201,9 +1331,9 @@ wil_find_sta_by_key_usage(struct wil6210_priv *wil, u8 mid,
        return &wil->sta[cid];
 }
 
-static void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
-                             struct wil_sta_info *cs,
-                             struct key_params *params)
+void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
+                      struct wil_sta_info *cs,
+                      struct key_params *params)
 {
        struct wil_tid_crypto_rx_single *cc;
        int tid;
@@ -1286,13 +1416,19 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
                     params->seq_len, params->seq);
 
        if (IS_ERR(cs)) {
-               wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n",
-                       mac_addr, key_usage_str[key_usage], key_index,
-                       params->seq_len, params->seq);
-               return -EINVAL;
+               /* in FT, sta info may not be available as add_key may be
+                * sent by host before FW sends WMI_CONNECT_EVENT
+                */
+               if (!test_bit(wil_vif_ft_roam, vif->status)) {
+                       wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n",
+                               mac_addr, key_usage_str[key_usage], key_index,
+                               params->seq_len, params->seq);
+                       return -EINVAL;
+               }
        }
 
-       wil_del_rx_key(key_index, key_usage, cs);
+       if (!IS_ERR(cs))
+               wil_del_rx_key(key_index, key_usage, cs);
 
        if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
                wil_err(wil,
@@ -1305,7 +1441,10 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
 
        rc = wmi_add_cipher_key(vif, key_index, mac_addr, params->key_len,
                                params->key, key_usage);
-       if (!rc)
+       if (!rc && !IS_ERR(cs))
+               /* in FT set crypto will take place upon receiving
+                * WMI_RING_EN_EVENTID event
+                */
                wil_set_crypto_rx(key_index, key_usage, cs, params);
 
        return rc;
@@ -1468,21 +1607,36 @@ static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
 }
 
 /* internal functions for device reset and starting AP */
-static int _wil_cfg80211_set_ies(struct wil6210_vif *vif,
-                                struct cfg80211_beacon_data *bcon)
+static u8 *
+_wil_cfg80211_get_proberesp_ies(const u8 *proberesp, u16 proberesp_len,
+                               u16 *ies_len)
 {
-       int rc;
-       u16 len = 0, proberesp_len = 0;
-       u8 *ies = NULL, *proberesp = NULL;
+       u8 *ies = NULL;
 
-       if (bcon->probe_resp) {
+       if (proberesp) {
                struct ieee80211_mgmt *f =
-                       (struct ieee80211_mgmt *)bcon->probe_resp;
+                       (struct ieee80211_mgmt *)proberesp;
                size_t hlen = offsetof(struct ieee80211_mgmt,
                                       u.probe_resp.variable);
-               proberesp = f->u.probe_resp.variable;
-               proberesp_len = bcon->probe_resp_len - hlen;
+
+               ies = f->u.probe_resp.variable;
+               if (ies_len)
+                       *ies_len = proberesp_len - hlen;
        }
+
+       return ies;
+}
+
+static int _wil_cfg80211_set_ies(struct wil6210_vif *vif,
+                                struct cfg80211_beacon_data *bcon)
+{
+       int rc;
+       u16 len = 0, proberesp_len = 0;
+       u8 *ies = NULL, *proberesp;
+
+       proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp,
+                                                   bcon->probe_resp_len,
+                                                   &proberesp_len);
        rc = _wil_cfg80211_merge_extra_ies(proberesp,
                                           proberesp_len,
                                           bcon->proberesp_ies,
@@ -1526,6 +1680,9 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
        struct wireless_dev *wdev = ndev->ieee80211_ptr;
        u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
        u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO);
+       u16 proberesp_len = 0;
+       u8 *proberesp;
+       bool ft = false;
 
        if (pbss)
                wmi_nettype = WMI_NETTYPE_P2P;
@@ -1538,6 +1695,25 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
 
        wil_set_recovery_state(wil, fw_recovery_idle);
 
+       proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp,
+                                                   bcon->probe_resp_len,
+                                                   &proberesp_len);
+       /* check that the probe response IEs has a MDE */
+       if ((proberesp && proberesp_len > 0 &&
+            cfg80211_find_ie(WLAN_EID_MOBILITY_DOMAIN,
+                             proberesp,
+                             proberesp_len)))
+               ft = true;
+
+       if (ft) {
+               if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING,
+                             wil->fw_capabilities)) {
+                       wil_err(wil, "FW does not support FT roaming\n");
+                       return -ENOTSUPP;
+               }
+               set_bit(wil_vif_ft_roam, vif->status);
+       }
+
        mutex_lock(&wil->mutex);
 
        if (!wil_has_other_active_ifaces(wil, ndev, true, false)) {
@@ -1699,6 +1875,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
        mutex_lock(&wil->mutex);
 
        wmi_pcp_stop(vif);
+       clear_bit(wil_vif_ft_roam, vif->status);
 
        if (last)
                __wil_down(wil);
@@ -1718,8 +1895,9 @@ static int wil_cfg80211_add_station(struct wiphy *wiphy,
        struct wil6210_vif *vif = ndev_to_vif(dev);
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
-       wil_dbg_misc(wil, "add station %pM aid %d mid %d\n",
-                    mac, params->aid, vif->mid);
+       wil_dbg_misc(wil, "add station %pM aid %d mid %d mask 0x%x set 0x%x\n",
+                    mac, params->aid, vif->mid,
+                    params->sta_flags_mask, params->sta_flags_set);
 
        if (!disable_ap_sme) {
                wil_err(wil, "not supported with AP SME enabled\n");
@@ -2040,6 +2218,54 @@ wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev,
        return 0;
 }
 
+static int
+wil_cfg80211_update_ft_ies(struct wiphy *wiphy, struct net_device *dev,
+                          struct cfg80211_update_ft_ies_params *ftie)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct wil6210_vif *vif = ndev_to_vif(dev);
+       struct cfg80211_bss *bss;
+       struct wmi_ft_reassoc_cmd reassoc;
+       int rc = 0;
+
+       wil_dbg_misc(wil, "update ft ies, mid=%d\n", vif->mid);
+       wil_hex_dump_misc("FT IE ", DUMP_PREFIX_OFFSET, 16, 1,
+                         ftie->ie, ftie->ie_len, true);
+
+       if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, wil->fw_capabilities)) {
+               wil_err(wil, "FW does not support FT roaming\n");
+               return -EOPNOTSUPP;
+       }
+
+       rc = wmi_update_ft_ies(vif, ftie->ie_len, ftie->ie);
+       if (rc)
+               return rc;
+
+       if (!test_bit(wil_vif_ft_roam, vif->status))
+               /* vif is not roaming */
+               return 0;
+
+       /* wil_vif_ft_roam is set. wil_cfg80211_update_ft_ies is used as
+        * a trigger for reassoc
+        */
+
+       bss = vif->bss;
+       if (!bss) {
+               wil_err(wil, "FT: bss is NULL\n");
+               return -EINVAL;
+       }
+
+       memset(&reassoc, 0, sizeof(reassoc));
+       ether_addr_copy(reassoc.bssid, bss->bssid);
+
+       rc = wmi_send(wil, WMI_FT_REASSOC_CMDID, vif->mid,
+                     &reassoc, sizeof(reassoc));
+       if (rc)
+               wil_err(wil, "FT: reassoc failed (%d)\n", rc);
+
+       return rc;
+}
+
 static const struct cfg80211_ops wil_cfg80211_ops = {
        .add_virtual_intf = wil_cfg80211_add_iface,
        .del_virtual_intf = wil_cfg80211_del_iface,
@@ -2075,6 +2301,7 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
        .resume = wil_cfg80211_resume,
        .sched_scan_start = wil_cfg80211_sched_scan_start,
        .sched_scan_stop = wil_cfg80211_sched_scan_stop,
+       .update_ft_ies = wil_cfg80211_update_ft_ies,
 };
 
 static void wil_wiphy_init(struct wiphy *wiphy)
index 49533f88499397f49964e71d904dbd6bf72712cd..66ffae2de86e654bbedd8947488f7c94f0fcb3b1 100644 (file)
@@ -725,32 +725,6 @@ struct dentry *wil_debugfs_create_ioblob(const char *name,
        return debugfs_create_file(name, mode, parent, wil_blob, &fops_ioblob);
 }
 
-/*---reset---*/
-static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
-                                   size_t len, loff_t *ppos)
-{
-       struct wil6210_priv *wil = file->private_data;
-       struct net_device *ndev = wil->main_ndev;
-
-       /**
-        * BUG:
-        * this code does NOT sync device state with the rest of system
-        * use with care, debug only!!!
-        */
-       rtnl_lock();
-       dev_close(ndev);
-       ndev->flags &= ~IFF_UP;
-       rtnl_unlock();
-       wil_reset(wil, true);
-
-       return len;
-}
-
-static const struct file_operations fops_reset = {
-       .write = wil_write_file_reset,
-       .open  = simple_open,
-};
-
 /*---write channel 1..4 to rxon for it, 0 to rxoff---*/
 static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf,
                                   size_t len, loff_t *ppos)
@@ -1263,6 +1237,9 @@ static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data)
        int num_active;
        int num_free;
 
+       if (!rbm->buff_arr)
+               return -EINVAL;
+
        seq_printf(s, "  size = %zu\n", rbm->size);
        seq_printf(s, "  free_list_empty_cnt = %lu\n",
                   rbm->free_list_empty_cnt);
@@ -1695,6 +1672,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
                char *status = "unknown";
                u8 aid = 0;
                u8 mid;
+               bool sta_connected = false;
 
                switch (p->status) {
                case wil_sta_unused:
@@ -1709,8 +1687,20 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
                        break;
                }
                mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
-               seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, p->addr, status,
-                          mid, aid);
+               if (mid < wil->max_vifs) {
+                       struct wil6210_vif *vif = wil->vifs[mid];
+
+                       if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
+                           p->status == wil_sta_connected)
+                               sta_connected = true;
+               }
+               /* print roam counter only for connected stations */
+               if (sta_connected)
+                       seq_printf(s, "[%d] %pM connected (roam counter %d) MID %d AID %d\n",
+                                  i, p->addr, p->stats.ft_roams, mid, aid);
+               else
+                       seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i,
+                                  p->addr, status, mid, aid);
 
                if (p->status == wil_sta_connected) {
                        spin_lock_bh(&p->tid_rx_lock);
@@ -2451,7 +2441,6 @@ static const struct {
        {"desc",        0444,           &fops_txdesc},
        {"bf",          0444,           &fops_bf},
        {"mem_val",     0644,           &fops_memread},
-       {"reset",       0244,           &fops_reset},
        {"rxon",        0244,           &fops_rxon},
        {"tx_mgmt",     0244,           &fops_txmgmt},
        {"wmi_send", 0244,              &fops_wmi},
index 7debed6bec06b4dcbbd66dc6e91a9392c9857387..398900a1c29e2bef9dd015e09e263b8861554c86 100644 (file)
@@ -223,6 +223,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
        struct net_device *ndev = vif_to_ndev(vif);
        struct wireless_dev *wdev = vif_to_wdev(vif);
        struct wil_sta_info *sta = &wil->sta[cid];
+       int min_ring_id = wil_get_min_tx_ring_id(wil);
 
        might_sleep();
        wil_dbg_misc(wil, "disconnect_cid: CID %d, MID %d, status %d\n",
@@ -273,7 +274,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
        memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
        memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
        /* release vrings */
-       for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
+       for (i = min_ring_id; i < ARRAY_SIZE(wil->ring_tx); i++) {
                if (wil->ring2cid_tid[i][0] == cid)
                        wil_ring_fini_tx(wil, i);
        }
@@ -360,6 +361,8 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
                        vif->bss = NULL;
                }
                clear_bit(wil_vif_fwconnecting, vif->status);
+               clear_bit(wil_vif_ft_roam, vif->status);
+
                break;
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
@@ -604,8 +607,10 @@ int wil_priv_init(struct wil6210_priv *wil)
                wil->sta[i].mid = U8_MAX;
        }
 
-       for (i = 0; i < WIL6210_MAX_TX_RINGS; i++)
+       for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
                spin_lock_init(&wil->ring_tx_data[i].lock);
+               wil->ring2cid_tid[i][0] = WIL6210_MAX_CID;
+       }
 
        mutex_init(&wil->mutex);
        mutex_init(&wil->vif_mutex);
@@ -653,8 +658,6 @@ int wil_priv_init(struct wil6210_priv *wil)
 
        /* edma configuration can be updated via debugfs before allocation */
        wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS;
-       wil->use_compressed_rx_status = true;
-       wil->use_rx_hw_reordering = true;
        wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
 
        /* Rx status ring size should be bigger than the number of RX buffers
@@ -1154,6 +1157,8 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
                wil->max_agg_wsize = WIL_MAX_AGG_WSIZE;
                wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE;
        }
+
+       update_supported_bands(wil);
 }
 
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
index 89119e7facd00c661600bdd4d7648570b733621c..c8c6613371d1bcbd2d0a8e079053bc08b2c41d89 100644 (file)
@@ -108,6 +108,7 @@ int wil_set_capabilities(struct wil6210_priv *wil)
                set_bit(hw_capa_no_flash, wil->hw_capa);
                wil->use_enhanced_dma_hw = true;
                wil->use_rx_hw_reordering = true;
+               wil->use_compressed_rx_status = true;
                wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
                              WIL_FW_NAME_TALYN;
                if (wil_fw_verify_file_exists(wil, wil_fw_name))
index 3a4194779ddf6371da82b934565bbf6c69eb2c27..75fe9323547ca58d53475bd27bebf262be6e8e93 100644 (file)
@@ -190,7 +190,7 @@ out:
 static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
 {
        int rc = 0;
-       unsigned long start, data_comp_to;
+       unsigned long data_comp_to;
 
        wil_dbg_pm(wil, "suspend keep radio on\n");
 
@@ -232,7 +232,6 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
        }
 
        /* Wait for completion of the pending RX packets */
-       start = jiffies;
        data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
        if (test_bit(wil_status_napi_en, wil->status)) {
                while (!wil->txrx_ops.is_rx_idle(wil)) {
index b608aa16b4f1edf5fa772c6be4cb920b860b6a01..983bd001b53bf5405ed1e6d82d6890e0e4105e74 100644 (file)
@@ -382,11 +382,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
        }
 
        /* apply */
-       r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
-       spin_lock_bh(&sta->tid_rx_lock);
-       wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
-       sta->tid_rx[tid] = r;
-       spin_unlock_bh(&sta->tid_rx_lock);
+       if (!wil->use_rx_hw_reordering) {
+               r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
+               spin_lock_bh(&sta->tid_rx_lock);
+               wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
+               sta->tid_rx[tid] = r;
+               spin_unlock_bh(&sta->tid_rx_lock);
+       }
 
 out:
        return rc;
index 6a7943e487fb11ba0fc62966da01d5de66826d0a..cc5f263cc965324285394ee85960fcfb6a3a1e25 100644 (file)
@@ -77,8 +77,9 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
 {
        int i;
        unsigned long data_comp_to;
+       int min_ring_id = wil_get_min_tx_ring_id(wil);
 
-       for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+       for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
                struct wil_ring *vring = &wil->ring_tx[i];
                int vring_index = vring - wil->ring_tx;
                struct wil_ring_tx_data *txdata =
@@ -765,7 +766,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
                return;
        }
 
-       if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
+       if (wdev->iftype == NL80211_IFTYPE_STATION) {
+               if (mcast && ether_addr_equal(eth->h_source, ndev->dev_addr)) {
+                       /* mcast packet looped back to us */
+                       rc = GRO_DROP;
+                       dev_kfree_skb(skb);
+                       goto stats;
+               }
+       } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
                if (mcast) {
                        /* send multicast frames both to higher layers in
                         * local net stack and back to the wireless medium
@@ -1051,6 +1059,88 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
        return rc;
 }
 
+static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid,
+                              int tid)
+{
+       struct wil6210_priv *wil = vif_to_wil(vif);
+       int rc;
+       struct wmi_vring_cfg_cmd cmd = {
+               .action = cpu_to_le32(WMI_VRING_CMD_MODIFY),
+               .vring_cfg = {
+                       .tx_sw_ring = {
+                               .max_mpdu_size =
+                                       cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+                               .ring_size = 0,
+                       },
+                       .ringid = ring_id,
+                       .cidxtid = mk_cidxtid(cid, tid),
+                       .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+                       .mac_ctrl = 0,
+                       .to_resolution = 0,
+                       .agg_max_wsize = 0,
+                       .schd_params = {
+                               .priority = cpu_to_le16(0),
+                               .timeslot_us = cpu_to_le16(0xfff),
+                       },
+               },
+       };
+       struct {
+               struct wmi_cmd_hdr wmi;
+               struct wmi_vring_cfg_done_event cmd;
+       } __packed reply = {
+               .cmd = {.status = WMI_FW_STATUS_FAILURE},
+       };
+       struct wil_ring *vring = &wil->ring_tx[ring_id];
+       struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+       wil_dbg_misc(wil, "vring_modify: ring %d cid %d tid %d\n", ring_id,
+                    cid, tid);
+       lockdep_assert_held(&wil->mutex);
+
+       if (!vring->va) {
+               wil_err(wil, "Tx ring [%d] not allocated\n", ring_id);
+               return -EINVAL;
+       }
+
+       if (wil->ring2cid_tid[ring_id][0] != cid ||
+           wil->ring2cid_tid[ring_id][1] != tid) {
+               wil_err(wil, "ring info does not match cid=%u tid=%u\n",
+                       wil->ring2cid_tid[ring_id][0],
+                       wil->ring2cid_tid[ring_id][1]);
+       }
+
+       cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+
+       rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
+                     WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+       if (rc)
+               goto fail;
+
+       if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
+               wil_err(wil, "Tx modify failed, status 0x%02x\n",
+                       reply.cmd.status);
+               rc = -EINVAL;
+               goto fail;
+       }
+
+       /* set BA aggregation window size to 0 to force a new BA with the
+        * new AP
+        */
+       txdata->agg_wsize = 0;
+       if (txdata->dot1x_open && agg_wsize >= 0)
+               wil_addba_tx_request(wil, ring_id, agg_wsize);
+
+       return 0;
+fail:
+       spin_lock_bh(&txdata->lock);
+       txdata->dot1x_open = false;
+       txdata->enabled = 0;
+       spin_unlock_bh(&txdata->lock);
+       wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
+       wil->ring2cid_tid[ring_id][1] = 0;
+       return rc;
+}
+
 int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
 {
        struct wil6210_priv *wil = vif_to_wil(vif);
@@ -1935,6 +2025,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
                                           bool check_stop)
 {
        int i;
+       int min_ring_id = wil_get_min_tx_ring_id(wil);
 
        if (unlikely(!vif))
                return;
@@ -1967,7 +2058,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
                return;
 
        /* check wake */
-       for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+       for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
                struct wil_ring *cur_ring = &wil->ring_tx[i];
                struct wil_ring_tx_data  *txdata = &wil->ring_tx_data[i];
 
@@ -2272,6 +2363,7 @@ void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
        wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
        wil->txrx_ops.tx_init = wil_tx_init;
        wil->txrx_ops.tx_fini = wil_tx_fini;
+       wil->txrx_ops.tx_ring_modify = wil_tx_vring_modify;
        /* RX ops */
        wil->txrx_ops.rx_init = wil_rx_init;
        wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
index bca61cb44c37542ca43ddc58f1d1b9a7fbfac103..2bbae75b9a846373d91835cf5d5dbcc070b3cd8e 100644 (file)
@@ -279,9 +279,6 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
                u16 buff_id;
 
                *d = *_d;
-               pa = wil_rx_desc_get_addr_edma(&d->dma);
-               dmalen = le16_to_cpu(d->dma.length);
-               dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
 
                /* Extract the SKB from the rx_buff management array */
                buff_id = __le16_to_cpu(d->mac.buff_id);
@@ -291,10 +288,15 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
                }
                skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
                wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
-               if (unlikely(!skb))
+               if (unlikely(!skb)) {
                        wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
-               else
+               } else {
+                       pa = wil_rx_desc_get_addr_edma(&d->dma);
+                       dmalen = le16_to_cpu(d->dma.length);
+                       dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
+
                        kfree_skb(skb);
+               }
 
                /* Move the buffer from the active to the free list */
                list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
@@ -745,6 +747,16 @@ static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
        return rc;
 }
 
+static int wil_tx_ring_modify_edma(struct wil6210_vif *vif, int ring_id,
+                                  int cid, int tid)
+{
+       struct wil6210_priv *wil = vif_to_wil(vif);
+
+       wil_err(wil, "ring modify is not supported for EDMA\n");
+
+       return -EOPNOTSUPP;
+}
+
 /* This function is used only for RX SW reorder */
 static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid,
                         struct sk_buff *skb, struct wil_net_stats *stats)
@@ -906,6 +918,9 @@ again:
        wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
        if (!skb) {
                wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+               /* Move the buffer from the active list to the free list */
+               list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+                         &wil->rx_buff_mgmt.free);
                goto again;
        }
 
@@ -1595,6 +1610,7 @@ void wil_init_txrx_ops_edma(struct wil6210_priv *wil)
        wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma;
        wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma;
        wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma;
+       wil->txrx_ops.tx_ring_modify = wil_tx_ring_modify_edma;
        /* RX ops */
        wil->txrx_ops.rx_init = wil_rx_init_edma;
        wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma;
index 17c294b1ead13b3bfdca6328481a0a1a7db1a5ae..abb82018d3b4344121759ee5f11c100992cbe9d0 100644 (file)
@@ -449,6 +449,15 @@ static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
        *tid = (cidxtid >> 4) & 0xf;
 }
 
+/**
+ * wil_cid_valid - check cid is valid
+ * @cid: CID value
+ */
+static inline bool wil_cid_valid(u8 cid)
+{
+       return cid < WIL6210_MAX_CID;
+}
+
 struct wil6210_mbox_ring {
        u32 base;
        u16 entry_size; /* max. size of mbox entry, incl. all headers */
@@ -577,6 +586,7 @@ struct wil_net_stats {
        unsigned long   rx_csum_err;
        u16 last_mcs_rx;
        u64 rx_per_mcs[WIL_MCS_MAX + 1];
+       u32 ft_roams; /* relevant in STA mode */
 };
 
 /**
@@ -599,6 +609,8 @@ struct wil_txrx_ops {
                              struct wil_ctx *ctx);
        int (*tx_ring_tso)(struct wil6210_priv *wil, struct wil6210_vif *vif,
                           struct wil_ring *ring, struct sk_buff *skb);
+       int (*tx_ring_modify)(struct wil6210_vif *vif, int ring_id,
+                             int cid, int tid);
        irqreturn_t (*irq_tx)(int irq, void *cookie);
        /* RX ops */
        int (*rx_init)(struct wil6210_priv *wil, u16 ring_size);
@@ -821,6 +833,7 @@ extern u8 led_polarity;
 enum wil6210_vif_status {
        wil_vif_fwconnecting,
        wil_vif_fwconnected,
+       wil_vif_ft_roam,
        wil_vif_status_last /* keep last */
 };
 
@@ -1204,6 +1217,7 @@ int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index,
 int wmi_echo(struct wil6210_priv *wil);
 int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie);
 int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
+int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie);
 int wmi_rxon(struct wil6210_priv *wil, bool on);
 int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
 int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
@@ -1319,6 +1333,9 @@ void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil);
 void wil_rx_handle(struct wil6210_priv *wil, int *quota);
 void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
 void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil);
+void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
+                      struct wil_sta_info *cs,
+                      struct key_params *params);
 
 int wil_iftype_nl2wmi(enum nl80211_iftype type);
 
@@ -1370,4 +1387,6 @@ int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
                           u8 tid, u8 token, u16 status, bool amsdu,
                           u16 agg_wsize, u16 timeout);
 
+void update_supported_bands(struct wil6210_priv *wil);
+
 #endif /* __WIL6210_H__ */
index 42c02a20ec97cafa5336c63ccae019c5735021ae..4859f0e43658ce00d8f4c7990dc6ef23d0357dc0 100644 (file)
@@ -227,6 +227,14 @@ struct blink_on_off_time led_blink_time[] = {
        {WIL_LED_BLINK_ON_FAST_MS, WIL_LED_BLINK_OFF_FAST_MS},
 };
 
+struct auth_no_hdr {
+       __le16 auth_alg;
+       __le16 auth_transaction;
+       __le16 status_code;
+       /* possibly followed by Challenge text */
+       u8 variable[0];
+} __packed;
+
 u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
 
 /**
@@ -468,6 +476,12 @@ static const char *cmdid2name(u16 cmdid)
                return "WMI_LINK_STATS_CMD";
        case WMI_SW_TX_REQ_EXT_CMDID:
                return "WMI_SW_TX_REQ_EXT_CMDID";
+       case WMI_FT_AUTH_CMDID:
+               return "WMI_FT_AUTH_CMD";
+       case WMI_FT_REASSOC_CMDID:
+               return "WMI_FT_REASSOC_CMD";
+       case WMI_UPDATE_FT_IES_CMDID:
+               return "WMI_UPDATE_FT_IES_CMD";
        default:
                return "Untracked CMD";
        }
@@ -606,6 +620,12 @@ static const char *eventid2name(u16 eventid)
                return "WMI_LINK_STATS_CONFIG_DONE_EVENT";
        case WMI_LINK_STATS_EVENTID:
                return "WMI_LINK_STATS_EVENT";
+       case WMI_COMMAND_NOT_SUPPORTED_EVENTID:
+               return "WMI_COMMAND_NOT_SUPPORTED_EVENT";
+       case WMI_FT_AUTH_STATUS_EVENTID:
+               return "WMI_FT_AUTH_STATUS_EVENT";
+       case WMI_FT_REASSOC_STATUS_EVENTID:
+               return "WMI_FT_REASSOC_STATUS_EVENT";
        default:
                return "Untracked EVENT";
        }
@@ -1156,6 +1176,9 @@ static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
        struct wmi_ring_en_event *evt = d;
        u8 vri = evt->ring_index;
        struct wireless_dev *wdev = vif_to_wdev(vif);
+       struct wil_sta_info *sta;
+       u8 cid;
+       struct key_params params;
 
        wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid);
 
@@ -1164,13 +1187,33 @@ static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
                return;
        }
 
-       if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme)
-               /* in AP mode with disable_ap_sme, this is done by
-                * wil_cfg80211_change_station()
+       if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme ||
+           test_bit(wil_vif_ft_roam, vif->status))
+               /* in AP mode with disable_ap_sme that is not FT,
+                * this is done by wil_cfg80211_change_station()
                 */
                wil->ring_tx_data[vri].dot1x_open = true;
        if (vri == vif->bcast_ring) /* no BA for bcast */
                return;
+
+       cid = wil->ring2cid_tid[vri][0];
+       if (!wil_cid_valid(cid)) {
+               wil_err(wil, "invalid cid %d for vring %d\n", cid, vri);
+               return;
+       }
+
+       /* In FT mode we get key but not store it as it is received
+        * before WMI_CONNECT_EVENT received from FW.
+        * wil_set_crypto_rx is called here to reset the security PN
+        */
+       sta = &wil->sta[cid];
+       if (test_bit(wil_vif_ft_roam, vif->status)) {
+               memset(&params, 0, sizeof(params));
+               wil_set_crypto_rx(0, WMI_KEY_USE_PAIRWISE, sta, &params);
+               if (wdev->iftype != NL80211_IFTYPE_AP)
+                       clear_bit(wil_vif_ft_roam, vif->status);
+       }
+
        if (agg_wsize >= 0)
                wil_addba_tx_request(wil, vri, agg_wsize);
 }
@@ -1461,6 +1504,271 @@ wmi_evt_link_stats(struct wil6210_vif *vif, int id, void *d, int len)
                             evt->payload, payload_size);
 }
 
+/**
+ * find cid and ringid for the station vif
+ *
+ * return error, if other interfaces are used or ring was not found
+ */
+static int wil_find_cid_ringid_sta(struct wil6210_priv *wil,
+                                  struct wil6210_vif *vif,
+                                  int *cid,
+                                  int *ringid)
+{
+       struct wil_ring *ring;
+       struct wil_ring_tx_data *txdata;
+       int min_ring_id = wil_get_min_tx_ring_id(wil);
+       int i;
+       u8 lcid;
+
+       if (!(vif->wdev.iftype == NL80211_IFTYPE_STATION ||
+             vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+               wil_err(wil, "invalid interface type %d\n", vif->wdev.iftype);
+               return -EINVAL;
+       }
+
+       /* In the STA mode, it is expected to have only one ring
+        * for the AP we are connected to.
+        * find it and return the cid associated with it.
+        */
+       for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+               ring = &wil->ring_tx[i];
+               txdata = &wil->ring_tx_data[i];
+               if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
+                       continue;
+
+               lcid = wil->ring2cid_tid[i][0];
+               if (lcid >= WIL6210_MAX_CID) /* skip BCAST */
+                       continue;
+
+               wil_dbg_wmi(wil, "find sta -> ringid %d cid %d\n", i, lcid);
+               *cid = lcid;
+               *ringid = i;
+               return 0;
+       }
+
+       wil_dbg_wmi(wil, "find sta cid while no rings active?\n");
+
+       return -ENOENT;
+}
+
+static void
+wmi_evt_auth_status(struct wil6210_vif *vif, int id, void *d, int len)
+{
+       struct wil6210_priv *wil = vif_to_wil(vif);
+       struct net_device *ndev = vif_to_ndev(vif);
+       struct wmi_ft_auth_status_event *data = d;
+       int ie_len = len - offsetof(struct wmi_ft_auth_status_event, ie_info);
+       int rc, cid = 0, ringid = 0;
+       struct cfg80211_ft_event_params ft;
+       u16 d_len;
+       /* auth_alg(u16) + auth_transaction(u16) + status_code(u16) */
+       const size_t auth_ie_offset = sizeof(u16) * 3;
+       struct auth_no_hdr *auth = (struct auth_no_hdr *)data->ie_info;
+
+       /* check the status */
+       if (ie_len >= 0 && data->status != WMI_FW_STATUS_SUCCESS) {
+               wil_err(wil, "FT: auth failed. status %d\n", data->status);
+               goto fail;
+       }
+
+       if (ie_len < auth_ie_offset) {
+               wil_err(wil, "FT: auth event too short, len %d\n", len);
+               goto fail;
+       }
+
+       d_len = le16_to_cpu(data->ie_len);
+       if (d_len != ie_len) {
+               wil_err(wil,
+                       "FT: auth ie length mismatch, d_len %d should be %d\n",
+                       d_len, ie_len);
+               goto fail;
+       }
+
+       if (!test_bit(wil_vif_ft_roam, wil->status)) {
+               wil_err(wil, "FT: Not in roaming state\n");
+               goto fail;
+       }
+
+       if (le16_to_cpu(auth->auth_transaction) != 2) {
+               wil_err(wil, "FT: auth error. auth_transaction %d\n",
+                       le16_to_cpu(auth->auth_transaction));
+               goto fail;
+       }
+
+       if (le16_to_cpu(auth->auth_alg) != WLAN_AUTH_FT) {
+               wil_err(wil, "FT: auth error. auth_alg %d\n",
+                       le16_to_cpu(auth->auth_alg));
+               goto fail;
+       }
+
+       wil_dbg_wmi(wil, "FT: Auth to %pM successfully\n", data->mac_addr);
+       wil_hex_dump_wmi("FT Auth ies : ", DUMP_PREFIX_OFFSET, 16, 1,
+                        data->ie_info, d_len, true);
+
+       /* find cid and ringid */
+       rc = wil_find_cid_ringid_sta(wil, vif, &cid, &ringid);
+       if (rc) {
+               wil_err(wil, "No valid cid found\n");
+               goto fail;
+       }
+
+       if (vif->privacy) {
+               /* For secure assoc, remove old keys */
+               rc = wmi_del_cipher_key(vif, 0, wil->sta[cid].addr,
+                                       WMI_KEY_USE_PAIRWISE);
+               if (rc) {
+                       wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n");
+                       goto fail;
+               }
+               rc = wmi_del_cipher_key(vif, 0, wil->sta[cid].addr,
+                                       WMI_KEY_USE_RX_GROUP);
+               if (rc) {
+                       wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n");
+                       goto fail;
+               }
+       }
+
+       memset(&ft, 0, sizeof(ft));
+       ft.ies = data->ie_info + auth_ie_offset;
+       ft.ies_len = d_len - auth_ie_offset;
+       ft.target_ap = data->mac_addr;
+       cfg80211_ft_event(ndev, &ft);
+
+       return;
+
+fail:
+       wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false);
+}
+
+static void
+wmi_evt_reassoc_status(struct wil6210_vif *vif, int id, void *d, int len)
+{
+       struct wil6210_priv *wil = vif_to_wil(vif);
+       struct net_device *ndev = vif_to_ndev(vif);
+       struct wiphy *wiphy = wil_to_wiphy(wil);
+       struct wmi_ft_reassoc_status_event *data = d;
+       int ies_len = len - offsetof(struct wmi_ft_reassoc_status_event,
+                                    ie_info);
+       int rc = -ENOENT, cid = 0, ringid = 0;
+       int ch; /* channel number (primary) */
+       size_t assoc_req_ie_len = 0, assoc_resp_ie_len = 0;
+       u8 *assoc_req_ie = NULL, *assoc_resp_ie = NULL;
+       /* capinfo(u16) + listen_interval(u16) + current_ap mac addr + IEs */
+       const size_t assoc_req_ie_offset = sizeof(u16) * 2 + ETH_ALEN;
+       /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
+       const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
+       u16 d_len;
+       int freq;
+       struct cfg80211_roam_info info;
+
+       if (ies_len < 0) {
+               wil_err(wil, "ft reassoc event too short, len %d\n", len);
+               goto fail;
+       }
+
+       wil_dbg_wmi(wil, "Reasoc Status event: status=%d, aid=%d",
+                   data->status, data->aid);
+       wil_dbg_wmi(wil, "    mac_addr=%pM, beacon_ie_len=%d",
+                   data->mac_addr, data->beacon_ie_len);
+       wil_dbg_wmi(wil, "    reassoc_req_ie_len=%d, reassoc_resp_ie_len=%d",
+                   le16_to_cpu(data->reassoc_req_ie_len),
+                   le16_to_cpu(data->reassoc_resp_ie_len));
+
+       d_len = le16_to_cpu(data->beacon_ie_len) +
+               le16_to_cpu(data->reassoc_req_ie_len) +
+               le16_to_cpu(data->reassoc_resp_ie_len);
+       if (d_len != ies_len) {
+               wil_err(wil,
+                       "ft reassoc ie length mismatch, d_len %d should be %d\n",
+                       d_len, ies_len);
+               goto fail;
+       }
+
+       /* check the status */
+       if (data->status != WMI_FW_STATUS_SUCCESS) {
+               wil_err(wil, "ft reassoc failed. status %d\n", data->status);
+               goto fail;
+       }
+
+       /* find cid and ringid */
+       rc = wil_find_cid_ringid_sta(wil, vif, &cid, &ringid);
+       if (rc) {
+               wil_err(wil, "No valid cid found\n");
+               goto fail;
+       }
+
+       ch = data->channel + 1;
+       wil_info(wil, "FT: Roam %pM channel [%d] cid %d aid %d\n",
+                data->mac_addr, ch, cid, data->aid);
+
+       wil_hex_dump_wmi("reassoc AI : ", DUMP_PREFIX_OFFSET, 16, 1,
+                        data->ie_info, len - sizeof(*data), true);
+
+       /* figure out IE's */
+       if (le16_to_cpu(data->reassoc_req_ie_len) > assoc_req_ie_offset) {
+               assoc_req_ie = &data->ie_info[assoc_req_ie_offset];
+               assoc_req_ie_len = le16_to_cpu(data->reassoc_req_ie_len) -
+                       assoc_req_ie_offset;
+       }
+       if (le16_to_cpu(data->reassoc_resp_ie_len) <= assoc_resp_ie_offset) {
+               wil_err(wil, "FT: reassoc resp ie len is too short, len %d\n",
+                       le16_to_cpu(data->reassoc_resp_ie_len));
+               goto fail;
+       }
+
+       assoc_resp_ie = &data->ie_info[le16_to_cpu(data->reassoc_req_ie_len) +
+               assoc_resp_ie_offset];
+       assoc_resp_ie_len = le16_to_cpu(data->reassoc_resp_ie_len) -
+               assoc_resp_ie_offset;
+
+       if (test_bit(wil_status_resetting, wil->status) ||
+           !test_bit(wil_status_fwready, wil->status)) {
+               wil_err(wil, "FT: status_resetting, cancel reassoc event\n");
+               /* no need for cleanup, wil_reset will do that */
+               return;
+       }
+
+       mutex_lock(&wil->mutex);
+
+       /* ring modify to set the ring for the roamed AP settings */
+       wil_dbg_wmi(wil,
+                   "ft modify tx config for connection CID %d ring %d\n",
+                   cid, ringid);
+
+       rc = wil->txrx_ops.tx_ring_modify(vif, ringid, cid, 0);
+       if (rc) {
+               wil_err(wil, "modify TX for CID %d MID %d ring %d failed (%d)\n",
+                       cid, vif->mid, ringid, rc);
+               mutex_unlock(&wil->mutex);
+               goto fail;
+       }
+
+       /* Update the driver STA members with the new bss */
+       wil->sta[cid].aid = data->aid;
+       wil->sta[cid].stats.ft_roams++;
+       ether_addr_copy(wil->sta[cid].addr, vif->bss->bssid);
+       mutex_unlock(&wil->mutex);
+       del_timer_sync(&vif->connect_timer);
+
+       cfg80211_ref_bss(wiphy, vif->bss);
+       freq = ieee80211_channel_to_frequency(ch, NL80211_BAND_60GHZ);
+
+       memset(&info, 0, sizeof(info));
+       info.channel = ieee80211_get_channel(wiphy, freq);
+       info.bss = vif->bss;
+       info.req_ie = assoc_req_ie;
+       info.req_ie_len = assoc_req_ie_len;
+       info.resp_ie = assoc_resp_ie;
+       info.resp_ie_len = assoc_resp_ie_len;
+       cfg80211_roamed(ndev, &info, GFP_KERNEL);
+       vif->bss = NULL;
+
+       return;
+
+fail:
+       wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false);
+}
+
 /**
  * Some events are ignored for purpose; and need not be interpreted as
  * "unhandled events"
@@ -1492,6 +1800,8 @@ static const struct {
        {WMI_DATA_PORT_OPEN_EVENTID,            wmi_evt_ignore},
        {WMI_SCHED_SCAN_RESULT_EVENTID,         wmi_evt_sched_scan_result},
        {WMI_LINK_STATS_EVENTID,                wmi_evt_link_stats},
+       {WMI_FT_AUTH_STATUS_EVENTID,            wmi_evt_auth_status},
+       {WMI_FT_REASSOC_STATUS_EVENTID,         wmi_evt_reassoc_status},
 };
 
 /*
@@ -2086,6 +2396,40 @@ out:
        return rc;
 }
 
+int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie)
+{
+       struct wil6210_priv *wil = vif_to_wil(vif);
+       u16 len;
+       struct wmi_update_ft_ies_cmd *cmd;
+       int rc;
+
+       if (!ie)
+               ie_len = 0;
+
+       len = sizeof(struct wmi_update_ft_ies_cmd) + ie_len;
+       if (len < ie_len) {
+               wil_err(wil, "wraparound. ie len %d\n", ie_len);
+               return -EINVAL;
+       }
+
+       cmd = kzalloc(len, GFP_KERNEL);
+       if (!cmd) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       cmd->ie_len = cpu_to_le16(ie_len);
+       memcpy(cmd->ie_info, ie, ie_len);
+       rc = wmi_send(wil, WMI_UPDATE_FT_IES_CMDID, vif->mid, cmd, len);
+       kfree(cmd);
+
+out:
+       if (rc)
+               wil_err(wil, "update ft ies failed : %d\n", rc);
+
+       return rc;
+}
+
 /**
  * wmi_rxon - turn radio on/off
  * @on:                turn on if true, off otherwise
index 139acb2caf92fcac911d5212f7ac1e26c698dd07..b668758da994f8d66493615039efe0d4d38a12d0 100644 (file)
@@ -103,6 +103,7 @@ enum wmi_fw_capability {
        WMI_FW_CAPABILITY_AMSDU                         = 23,
        WMI_FW_CAPABILITY_RAW_MODE                      = 24,
        WMI_FW_CAPABILITY_TX_REQ_EXT                    = 25,
+       WMI_FW_CAPABILITY_CHANNEL_4                     = 26,
        WMI_FW_CAPABILITY_MAX,
 };
 
@@ -2369,6 +2370,7 @@ struct wmi_ft_reassoc_status_event {
        __le16 beacon_ie_len;
        __le16 reassoc_req_ie_len;
        __le16 reassoc_resp_ie_len;
+       u8 reserved[4];
        u8 ie_info[0];
 } __packed;
 
index b77d1a904f7e68f29fc73f34ef8defdf4fb71c56..9fc7c088a539e3728631ef41bf49004e60fa17f5 100644 (file)
@@ -909,7 +909,7 @@ struct b43_wl {
 
        /* Set this if we call ieee80211_register_hw() and check if we call
         * ieee80211_unregister_hw(). */
-       bool hw_registred;
+       bool hw_registered;
 
        /* We can only have one operating interface (802.11 core)
         * at a time. General information about this interface follows.
index 6b0e1ec346cb60aacd8076600033cf9ee554c462..dfc4c34298d4270a62117461b9c22073b71d10a3 100644 (file)
@@ -1432,7 +1432,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
                goto out;
        }
 
-       if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
+       if (WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME)) {
                /* If we get here, we have a real error with the queue
                 * full, but queues not stopped. */
                b43err(dev->wl, "DMA queue overflow\n");
@@ -1518,13 +1518,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
                        }
                } else {
                        /* More than a single header/data pair were missed.
-                        * Report this error, and reset the controller to
+                        * Report this error. If running with open-source
+                        * firmware, then reset the controller to
                         * revive operation.
                         */
                        b43dbg(dev->wl,
                               "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
                               ring->index, firstused, slot);
-                       b43_controller_restart(dev, "Out of order TX");
+                       if (dev->fw.opensource)
+                               b43_controller_restart(dev, "Out of order TX");
                        return;
                }
        }
index b37e7391f55defb4ded9635bdc15f55117789ba2..74be3c809225a19570c9ce07c92088655a3f04c1 100644 (file)
@@ -2611,7 +2611,7 @@ start_ieee80211:
        err = ieee80211_register_hw(wl->hw);
        if (err)
                goto err_one_core_detach;
-       wl->hw_registred = true;
+       wl->hw_registered = true;
        b43_leds_register(wl->current_dev);
 
        /* Register HW RNG driver */
@@ -5493,13 +5493,11 @@ err_powerdown:
 static void b43_one_core_detach(struct b43_bus_dev *dev)
 {
        struct b43_wldev *wldev;
-       struct b43_wl *wl;
 
        /* Do not cancel ieee80211-workqueue based work here.
         * See comment in b43_remove(). */
 
        wldev = b43_bus_get_wldev(dev);
-       wl = wldev->wl;
        b43_debugfs_remove_device(wldev);
        b43_wireless_core_detach(wldev);
        list_del(&wldev->list);
@@ -5610,7 +5608,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
 
        wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
 
-       wl->hw_registred = false;
+       wl->hw_registered = false;
        hw->max_rates = 2;
        SET_IEEE80211_DEV(hw, dev->dev);
        if (is_valid_ether_addr(sprom->et1mac))
@@ -5693,7 +5691,7 @@ static void b43_bcma_remove(struct bcma_device *core)
        B43_WARN_ON(!wl);
        if (!wldev->fw.ucode.data)
                return;                 /* NULL if firmware never loaded */
-       if (wl->current_dev == wldev && wl->hw_registred) {
+       if (wl->current_dev == wldev && wl->hw_registered) {
                b43_leds_stop(wldev);
                ieee80211_unregister_hw(wl->hw);
        }
@@ -5776,7 +5774,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
        B43_WARN_ON(!wl);
        if (!wldev->fw.ucode.data)
                return;                 /* NULL if firmware never loaded */
-       if (wl->current_dev == wldev && wl->hw_registred) {
+       if (wl->current_dev == wldev && wl->hw_registered) {
                b43_leds_stop(wldev);
                ieee80211_unregister_hw(wl->hw);
        }
index 2f0c64cef65f36d2da7cd93bb17f709faeb44320..1b1da7d83652ec14a2e4c58dce1fd9f5dff8d324 100644 (file)
@@ -1149,7 +1149,7 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
                return -ENOSPC;
        }
 
-       if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) {
+       if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) {
                /* If we get here, we have a real error with the queue
                 * full, but queues not stopped. */
                b43legacyerr(dev->wl, "DMA queue overflow\n");
index 5444e6213d4592e6ac35dd1889c82e4a8d62db97..230a378c26fcfdbd7e4ed14795a20420bf0ee596 100644 (file)
@@ -1649,6 +1649,14 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
                case WLAN_AKM_SUITE_PSK:
                        val = WPA2_AUTH_PSK;
                        break;
+               case WLAN_AKM_SUITE_FT_8021X:
+                       val = WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT;
+                       if (sme->want_1x)
+                               profile->use_fwsup = BRCMF_PROFILE_FWSUP_1X;
+                       break;
+               case WLAN_AKM_SUITE_FT_PSK:
+                       val = WPA2_AUTH_PSK | WPA2_AUTH_FT;
+                       break;
                default:
                        brcmf_err("invalid cipher group (%d)\n",
                                  sme->crypto.cipher_group);
index cd3651069d0c4e5bbe1810b0a91a9e433f27217d..94044a7a602164f830cfb82eebfd5530826d65c2 100644 (file)
@@ -296,9 +296,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
                /* Replace all newline/linefeed characters with space
                 * character
                 */
-               ptr = clmver;
-               while ((ptr = strnchr(ptr, '\n', sizeof(buf))) != NULL)
-                       *ptr = ' ';
+               strreplace(clmver, '\n', ' ');
 
                brcmf_dbg(INFO, "CLM version = %s\n", clmver);
        }
index 8347da632a5b0de85f9a68d9eedfd0436d2da915..4c5a3995dc352282e3243bd8a3ba608c55e17bf2 100644 (file)
@@ -178,7 +178,7 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
        ifp->fwil_fwerr = false;
 }
 
-#define MAX_CAPS_BUFFER_SIZE   512
+#define MAX_CAPS_BUFFER_SIZE   768
 static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp)
 {
        char caps[MAX_CAPS_BUFFER_SIZE];
index 3e9c4f2f5dd12673e8c96e6dcacbea50cf7bc45e..456a1bf008b3d62242b386f9547dc1899e555b34 100644 (file)
@@ -74,7 +74,7 @@
 #define P2P_AF_MAX_WAIT_TIME           msecs_to_jiffies(2000)
 #define P2P_INVALID_CHANNEL            -1
 #define P2P_CHANNEL_SYNC_RETRY         5
-#define P2P_AF_FRM_SCAN_MAX_WAIT       msecs_to_jiffies(1500)
+#define P2P_AF_FRM_SCAN_MAX_WAIT       msecs_to_jiffies(450)
 #define P2P_DEFAULT_SLEEP_TIME_VSDB    200
 
 /* WiFi P2P Public Action Frame OUI Subtypes */
@@ -1134,7 +1134,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
 {
        struct afx_hdl *afx_hdl = &p2p->afx_hdl;
        struct brcmf_cfg80211_vif *pri_vif;
-       unsigned long duration;
        s32 retry;
 
        brcmf_dbg(TRACE, "Enter\n");
@@ -1150,7 +1149,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
         * pending action frame tx is cancelled.
         */
        retry = 0;
-       duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT);
        while ((retry < P2P_CHANNEL_SYNC_RETRY) &&
               (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) {
                afx_hdl->is_listen = false;
@@ -1158,7 +1156,8 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
                          retry);
                /* search peer on peer's listen channel */
                schedule_work(&afx_hdl->afx_work);
-               wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration);
+               wait_for_completion_timeout(&afx_hdl->act_frm_scan,
+                                           P2P_AF_FRM_SCAN_MAX_WAIT);
                if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
                    (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
                               &p2p->status)))
@@ -1171,7 +1170,7 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
                        afx_hdl->is_listen = true;
                        schedule_work(&afx_hdl->afx_work);
                        wait_for_completion_timeout(&afx_hdl->act_frm_scan,
-                                                   duration);
+                                                   P2P_AF_FRM_SCAN_MAX_WAIT);
                }
                if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
                    (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
@@ -1458,10 +1457,12 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
                return 0;
 
        if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) {
-               if (e->status == BRCMF_E_STATUS_SUCCESS)
+               if (e->status == BRCMF_E_STATUS_SUCCESS) {
                        set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
                                &p2p->status);
-               else {
+                       if (!p2p->wait_for_offchan_complete)
+                               complete(&p2p->send_af_done);
+               } else {
                        set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
                        /* If there is no ack, we don't need to wait for
                         * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event
@@ -1512,6 +1513,17 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
        p2p->af_sent_channel = le32_to_cpu(af_params->channel);
        p2p->af_tx_sent_jiffies = jiffies;
 
+       if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status) &&
+           p2p->af_sent_channel ==
+           ieee80211_frequency_to_channel(p2p->remain_on_channel.center_freq))
+               p2p->wait_for_offchan_complete = false;
+       else
+               p2p->wait_for_offchan_complete = true;
+
+       brcmf_dbg(TRACE, "Waiting for %s tx completion event\n",
+                 (p2p->wait_for_offchan_complete) ?
+                  "off-channel" : "on-channel");
+
        timeout = wait_for_completion_timeout(&p2p->send_af_done,
                                              P2P_AF_MAX_WAIT_TIME);
 
index 0e8b34d2d85cb1b3dbc0ab9716e93c78628b132c..39f0d0218088236f20cb64eec06875836c1a4f96 100644 (file)
@@ -124,6 +124,7 @@ struct afx_hdl {
  * @gon_req_action: about to send go negotiation requets frame.
  * @block_gon_req_tx: drop tx go negotiation requets frame.
  * @p2pdev_dynamically: is p2p device if created by module param or supplicant.
+ * @wait_for_offchan_complete: wait for off-channel tx completion event.
  */
 struct brcmf_p2p_info {
        struct brcmf_cfg80211_info *cfg;
@@ -144,6 +145,7 @@ struct brcmf_p2p_info {
        bool gon_req_action;
        bool block_gon_req_tx;
        bool p2pdev_dynamically;
+       bool wait_for_offchan_complete;
 };
 
 s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced);
index 4fffa6988087b8bca0d6c834bb62a75d4a422c02..5dea569d63ed84568b7b7a3f302fdc678dff92fc 100644 (file)
@@ -2017,6 +2017,7 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = {
 
 static const struct pci_device_id brcmf_pcie_devid_table[] = {
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
+       BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
index 1e2fd289323aa210be3c2d1af6cb065ff199d5a4..b2e1ab5adb649523537b7d94edbeb16899b5fce3 100644 (file)
@@ -1463,7 +1463,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
        struct sk_buff *pfirst, *pnext;
 
        int errcode;
-       u8 doff, sfdoff;
+       u8 doff;
 
        struct brcmf_sdio_hdrinfo rd_new;
 
@@ -1597,7 +1597,6 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 
                /* Remove superframe header, remember offset */
                skb_pull(pfirst, rd_new.dat_offset);
-               sfdoff = rd_new.dat_offset;
                num = 0;
 
                /* Validate all the subframe headers */
@@ -3405,7 +3404,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        struct brcmf_sdio *bus = sdiodev->bus;
        struct brcmf_core *core = bus->sdio_core;
-       uint pad_size;
        u32 value;
        int err;
 
@@ -3448,7 +3446,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
        if (sdiodev->sg_support) {
                bus->txglom = false;
                value = 1;
-               pad_size = bus->sdiodev->func2->cur_blksize << 1;
                err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
                                           &value, sizeof(u32));
                if (err < 0) {
index 2fe1f686327822e48cab3d1d6e0539bb43eb0b41..3bd54f125776217cec982a3d23c1cc490dd0b44e 100644 (file)
@@ -62,8 +62,7 @@ int brcms_debugfs_attach(struct brcms_pub *drvr)
 
 void brcms_debugfs_detach(struct brcms_pub *drvr)
 {
-       if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
-               debugfs_remove_recursive(drvr->dbgfs_dir);
+       debugfs_remove_recursive(drvr->dbgfs_dir);
 }
 
 struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr)
index ecc89e718b9c146865b6e17917e3fb51023614aa..6255fb6d97a7087dfcbf11cc9182afafbd4c4982 100644 (file)
@@ -1578,10 +1578,10 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
                        if (le32_to_cpu(hdr->idx) == idx) {
                                pdata = wl->fw.fw_bin[i]->data +
                                        le32_to_cpu(hdr->offset);
-                               *pbuf = kmemdup(pdata, len, GFP_KERNEL);
+                               *pbuf = kvmalloc(len, GFP_KERNEL);
                                if (*pbuf == NULL)
                                        goto fail;
-
+                               memcpy(*pbuf, pdata, len);
                                return 0;
                        }
                }
@@ -1629,7 +1629,7 @@ int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
  */
 void brcms_ucode_free_buf(void *p)
 {
-       kfree(p);
+       kvfree(p);
 }
 
 /*
index bedec1606caa40010f51c74c421fd9c5eaa8246a..a57f2711f3c02296885ef6494414ef5b099becde 100644 (file)
@@ -25453,12 +25453,12 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
                        (pi->cal_type_override ==
                         PHY_PERICAL_FULL) ? true : false;
 
-       if ((pi->mphase_cal_phase_id > MPHASE_CAL_STATE_INIT)) {
+       if (pi->mphase_cal_phase_id > MPHASE_CAL_STATE_INIT) {
                if (pi->nphy_txiqlocal_chanspec != pi->radio_chanspec)
                        wlc_phy_cal_perical_mphase_restart(pi);
        }
 
-       if ((pi->mphase_cal_phase_id == MPHASE_CAL_STATE_RXCAL))
+       if (pi->mphase_cal_phase_id == MPHASE_CAL_STATE_RXCAL)
                wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000);
 
        wlapi_suspend_mac_and_wait(pi->sh->physhim);
index d8b79cb72b58d3153c095c5e93896a0ba99f539a..e7584b842dce4b8cc8fcd08504714e64eefc7f3a 100644 (file)
@@ -77,6 +77,8 @@ static u16 d11ac_bw(enum brcmu_chan_bw bw)
                return BRCMU_CHSPEC_D11AC_BW_40;
        case BRCMU_CHAN_BW_80:
                return BRCMU_CHSPEC_D11AC_BW_80;
+       case BRCMU_CHAN_BW_160:
+               return BRCMU_CHSPEC_D11AC_BW_160;
        default:
                WARN_ON(1);
        }
@@ -190,8 +192,38 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
                        break;
                }
                break;
-       case BRCMU_CHSPEC_D11AC_BW_8080:
        case BRCMU_CHSPEC_D11AC_BW_160:
+               switch (ch->sb) {
+               case BRCMU_CHAN_SB_LLL:
+                       ch->control_ch_num -= CH_70MHZ_APART;
+                       break;
+               case BRCMU_CHAN_SB_LLU:
+                       ch->control_ch_num -= CH_50MHZ_APART;
+                       break;
+               case BRCMU_CHAN_SB_LUL:
+                       ch->control_ch_num -= CH_30MHZ_APART;
+                       break;
+               case BRCMU_CHAN_SB_LUU:
+                       ch->control_ch_num -= CH_10MHZ_APART;
+                       break;
+               case BRCMU_CHAN_SB_ULL:
+                       ch->control_ch_num += CH_10MHZ_APART;
+                       break;
+               case BRCMU_CHAN_SB_ULU:
+                       ch->control_ch_num += CH_30MHZ_APART;
+                       break;
+               case BRCMU_CHAN_SB_UUL:
+                       ch->control_ch_num += CH_50MHZ_APART;
+                       break;
+               case BRCMU_CHAN_SB_UUU:
+                       ch->control_ch_num += CH_70MHZ_APART;
+                       break;
+               default:
+                       WARN_ON_ONCE(1);
+                       break;
+               }
+               break;
+       case BRCMU_CHSPEC_D11AC_BW_8080:
        default:
                WARN_ON_ONCE(1);
                break;
index 7b9a77981df16bd7f78dbb7ef2abdf2b1dfedf8c..dddebaa60352556c1af038fd119e67c05a9e2b2b 100644 (file)
@@ -29,6 +29,8 @@
 #define CH_UPPER_SB                    0x01
 #define CH_LOWER_SB                    0x02
 #define CH_EWA_VALID                   0x04
+#define CH_70MHZ_APART                 14
+#define CH_50MHZ_APART                 10
 #define CH_30MHZ_APART                 6
 #define CH_20MHZ_APART                 4
 #define CH_10MHZ_APART                 2
@@ -237,6 +239,7 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec)
 #define WPA2_AUTH_RESERVED4    0x0400
 #define WPA2_AUTH_RESERVED5    0x0800
 #define WPA2_AUTH_1X_SHA256    0x1000  /* 1X with SHA256 key derivation */
+#define WPA2_AUTH_FT           0x4000  /* Fast BSS Transition */
 #define WPA2_AUTH_PSK_SHA256   0x8000  /* PSK with SHA256 key derivation */
 
 #define DOT11_DEFAULT_RTS_LEN          2347
index 9644e7b93645f510394b1deb3accf6976fc87a78..bbdca13c5a9f8540195f693b3c45819881041f1d 100644 (file)
@@ -5652,7 +5652,7 @@ static void ipw_merge_adhoc_network(struct work_struct *work)
                }
 
                mutex_lock(&priv->mutex);
-               if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
+               if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
                        IPW_DEBUG_MERGE("remove network %*pE\n",
                                        priv->essid_len, priv->essid);
                        ipw_remove_current_network(priv);
index 5916879849621dc079a1cb0cd61848201057527d..76b5ddb202481a6caa6899a76828aa0156140152 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
@@ -51,6 +47,7 @@
 
 static const struct iwl_base_params iwl1000_base_params = {
        .num_of_queues = IWLAGN_NUM_QUEUES,
+       .max_tfd_queue_size = 256,
        .eeprom_size = OTP_LOW_IMAGE_SIZE,
        .pll_cfg = true,
        .max_ll_items = OTP_MAX_LL_ITEMS_1000,
index fedb108db68f1b4f0ea6f5123e27d52e8b1d06c6..e7e45846dd0736929321dc0b5e56aa31cc80f800 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 91ca77c7571cebb2a6dd3265b1713fb6d3202bda..da5d5f9b2573f29606ca2834bdcf2008582ac169 100644 (file)
@@ -56,7 +56,7 @@
 #include "iwl-config.h"
 
 /* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX        38
+#define IWL_22000_UCODE_API_MAX        41
 
 /* Lowest firmware API version supported */
 #define IWL_22000_UCODE_API_MIN        39
 #define IWL_22000_HR_FW_PRE            "iwlwifi-Qu-a0-hr-a0-"
 #define IWL_22000_HR_CDB_FW_PRE                "iwlwifi-QuIcp-z0-hrcdb-a0-"
 #define IWL_22000_HR_A_F0_FW_PRE       "iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_22000_HR_B_FW_PRE          "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_HR_B_F0_FW_PRE       "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_QU_B_HR_B_FW_PRE     "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_HR_B_FW_PRE          "iwlwifi-QuQnj-b0-hr-b0-"
 #define IWL_22000_JF_B0_FW_PRE         "iwlwifi-QuQnj-a0-jf-b0-"
 #define IWL_22000_HR_A0_FW_PRE         "iwlwifi-QuQnj-a0-hr-a0-"
 #define IWL_22000_SU_Z0_FW_PRE         "iwlwifi-su-z0-"
+#define IWL_QU_B_JF_B_FW_PRE           "iwlwifi-Qu-b0-jf-b0-"
 
 #define IWL_22000_HR_MODULE_FIRMWARE(api) \
        IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
        IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
        IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
+#define IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(api) \
+       IWL_22000_HR_B_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \
+       IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api)        \
        IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
        IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
        IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
        IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
+#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
+       IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_22000                10
 
@@ -134,7 +143,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
        .ucode_api_min = IWL_22000_UCODE_API_MIN,                       \
        .led_mode = IWL_LED_RF_STATE,                                   \
        .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000,          \
-       .non_shared_ant = ANT_A,                                        \
+       .non_shared_ant = ANT_B,                                        \
        .dccm_offset = IWL_22000_DCCM_OFFSET,                           \
        .dccm_len = IWL_22000_DCCM_LEN,                                 \
        .dccm2_offset = IWL_22000_DCCM2_OFFSET,                         \
@@ -155,7 +164,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
        .gen2 = true,                                                   \
        .nvm_type = IWL_NVM_EXT,                                        \
        .dbgc_supported = true,                                         \
-       .min_umac_error_event_table = 0x400000
+       .min_umac_error_event_table = 0x400000,                         \
+       .d3_debug_data_base_addr = 0x401000,                            \
+       .d3_debug_data_length = 60 * 1024
 
 #define IWL_DEVICE_22500                                               \
        IWL_DEVICE_22000_COMMON,                                        \
@@ -190,7 +201,54 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = {
 
 const struct iwl_cfg iwl22000_2ax_cfg_hr = {
        .name = "Intel(R) Dual Band Wireless AX 22000",
-       .fw_name_pre = IWL_22000_HR_FW_PRE,
+       .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
+       IWL_DEVICE_22500,
+       /*
+        * This device doesn't support receiving BlockAck with a large bitmap
+        * so we need to restrict the size of transmitted aggregation to the
+        * HT size; mac80211 would otherwise pick the HE max (256) by default.
+        */
+       .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+/*
+ * All JF radio modules are part of the 9000 series, but the MAC part
+ * looks more like 22000.  That's why this device is here, but called
+ * 9560 nevertheless.
+ */
+const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = {
+       .name = "Intel(R) Wireless-AC 9461",
+       .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+       IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = {
+       .name = "Intel(R) Wireless-AC 9462",
+       .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+       IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
+       .name = "Intel(R) Wireless-AC 9560",
+       .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+       IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
+       .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+       .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+       IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
+       .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+       .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+       IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_jf = {
+       .name = "Intel(R) Dual Band Wireless AX 22000",
+       .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
        IWL_DEVICE_22500,
        /*
         * This device doesn't support receiving BlockAck with a large bitmap
@@ -264,7 +322,10 @@ const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
 MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
index 36151e61a26f018f6c0ee1bc13a5931b08a6cffb..575a7022d045be7ad37243b1ae4c454efebd24dc 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index b5d8274761d8d776613376034232b87f9467e385..30e62a7c9d5212c7949f6472d577ede41eb4e409 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index a62c8346f13a47691a6dfe5bff657a90c2440f1c..c973bfaa341490e661eada64f5e2f208f4635b55 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index c46fa712985b7e70568461736957c7e09b063a20..348c40fcddcbfc218f4c774498301b0e287c9a85 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 24b2f7cbb308c95ca5ea378151d36e12f3c9b892..d55fd23cafe6f021ff8942c8debbae52d656697f 100644 (file)
@@ -57,7 +57,7 @@
 #include "fw/file.h"
 
 /* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX  38
+#define IWL9000_UCODE_API_MAX  41
 
 /* Lowest firmware API version supported */
 #define IWL9000_UCODE_API_MIN  30
@@ -155,7 +155,9 @@ static const struct iwl_tt_params iwl9000_tt_params = {
        .nvm_type = IWL_NVM_EXT,                                        \
        .dbgc_supported = true,                                         \
        .min_umac_error_event_table = 0x800000,                         \
-       .csr = &iwl_csr_v1
+       .csr = &iwl_csr_v1,                                             \
+       .d3_debug_data_base_addr = 0x401000,                            \
+       .d3_debug_data_length = 92 * 1024
 
 const struct iwl_cfg iwl9160_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 9160",
index b79e38734f2f64b241c7c7d42598b49cbbaaf760..431e13c6ee3540a171fe79b3f8a411ba9c842edc 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index c96f9b1d948ab3662c0e9db5f8a7752b47b54265..588b15697710db58926d73a6b15ee9170bcdb127 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 099e3ce80ffcc373c53da80b3c40f988e99e445b..c43ba94bfa8b5c41ff80932ea2df5b111f53edc1 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index f89736d60a3dd70b1290f4f9d63c66a16dc147e1..0f4be4be181cf0dbd6cdbec35abdd98be12307cd 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 096a07c5a33fbda5d8265980cd98af5bc6f18ba6..3d2e44a642de300d4eb101cfbc23ca08133f5e67 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index cceb4cd8e501bca99f46cddb976200beb34df8fb..c5b8376d827fc30cb00dfb73b25f2bf277376b0e 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index f21732ec3b2529017b151b374f2627e5b257e956..3dd7d8c45dab68a291944f0bc0615370bc0bfabb 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 1bbd17ada974723623acc6b1da53b77131f6121b..04c236e9399b4f8aa19c5e067468ee8e32970a06 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 75f74edd018fd58cefdcf274dd6adc382178709b..8f93a3246dee00048f33cd885e90ec90f1f6b55d 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 2b6ffbc46fa54c5c85025543057dcafe59fdb4ee..b2f172d4f78abaf0313de2b1f8a79ade30c03707 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 82caae02dd0940926cfd88ef0964545aba0ffe1e..49b71dbf849066c8d960dcf852d2c8caa2b0cdd6 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 030482b357a3c3038bcf456f2c782b04747fa70f..1088ff036e13e6ede3e7718dee5f8b4162b44fe6 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
@@ -1651,7 +1648,6 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
                        priv->status, table.valid);
        }
 
-       trace_iwlwifi_dev_ucode_error(trans->dev, &table, 0, table.brd_ver);
        IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
                desc_lookup(table.error_id));
        IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
index 0ad557c895140a72d17637b16cdb41d2be89f0f9..8c25e3aefb2b2983c26d92519d44159d1e7edaf9 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 2fd9b43adafda94ff27671921ea8c250018e2e31..a04fd4d375c6f0d6c3ea0fc42dd8d12dbdda6905 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 98050d7be41129dabcf1fe71889d7d2886d98edc..ef4b9de256f75b5231ed191fc7afa24394765ba3 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 50c1e951dd2de1b2088077fb77cb54c65c44a8bf..b2df3a8cc464d6a00bce98cc62da2df5c7bd6ce4 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index c942830af2b58ed8517c529ba4deb70c14e923f6..6f17a5e24e828268ce2810446dc481642de107f8 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 8f3e5586eda9fb964428852cfa7d96d0d5b2fe3b..eee1d48d453a17a21ef11137ad614975dfe13d35 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 17e6a32384d3cf5103acad0b9e4e7eb1f5bcf66b..8d7aafb4d9e93ee4e39db4751fbffcd627faecb7 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index de6ec9b7ace4587f778c2658dc5f7696debd8a4d..b1792de095949cf057332dbe63df8cb9803593a7 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 6524533d723c5a48d8bf68bcb3fabf9daa4c9ab9..4de2727ac63eaec188538fab7d2114a55d97f181 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index d324e9be9cbfc597e84965da55b921663e742b5f..6388c09603c6556ad7310d417441e23361842f0e 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index fb40ddfced999ca3b4516bec1f6f4133b6b61f1a..4ff323a3a4e5ad40123e4ed3f25f7b8f78d38684 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index d6013bfe991cde5f28b694ff72a0bb46812b0081..3bf57085b976a028f86a595819c2d5c23d01f974 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 75cae54ea7decbc8c7111ddb6e8a27bdb2cb6026..32d000cffe9f6ad69fa02c2d4ba794e6b790ee44 100644 (file)
@@ -16,9 +16,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program;
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index cb5f32c1d7057a4e54d8a9de0088c7b542c4280e..2439e98431eefe4f023ec9c0b38a72010b17e4d1 100644 (file)
@@ -16,9 +16,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program;
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 87c1ddea75aef33544114018269bee97185fe9c4..68060085010f8ce0a92f777829acd061c635e46c 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2017        Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2017        Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -203,6 +205,7 @@ enum iwl_bt_activity_grading {
        BT_ON_NO_CONNECTION     = 1,
        BT_LOW_TRAFFIC          = 2,
        BT_HIGH_TRAFFIC         = 3,
+       BT_VERY_HIGH_TRAFFIC    = 4,
 
        BT_MAX_AG,
 }; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
index 6dad748e5cdc9538d1d3c84442f3dea2e227c349..8b4922bbe139dff6c3551c54f87aa015210771bf 100644 (file)
@@ -436,7 +436,8 @@ enum iwl_legacy_cmds {
 
        /**
         * @REDUCE_TX_POWER_CMD:
-        * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd
+        * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd_v4
+        * or &struct iwl_dev_tx_power_cmd
         */
        REDUCE_TX_POWER_CMD = 0x9f,
 
index 57f4bc242023211843c491b4623abdaa0d543e12..6fae02fa4cadc907cda522faf1e54b55ef354c12 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -374,7 +376,7 @@ enum iwl_wowlan_wakeup_reason {
 
 }; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
 
-struct iwl_wowlan_gtk_status {
+struct iwl_wowlan_gtk_status_v1 {
        u8 key_index;
        u8 reserved[3];
        u8 decrypt_key[16];
@@ -382,9 +384,84 @@ struct iwl_wowlan_gtk_status {
        struct iwl_wowlan_rsc_tsc_params_cmd rsc;
 } __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
 
+#define WOWLAN_KEY_MAX_SIZE    32
+#define WOWLAN_GTK_KEYS_NUM     2
+#define WOWLAN_IGTK_KEYS_NUM   2
+
+/**
+ * struct iwl_wowlan_gtk_status - GTK status
+ * @key: GTK material
+ * @key_len: GTK legth, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ *     bits[0:1]:  key index assigned by the AP
+ *     bits[2:6]:  GTK index of the key in the internal DB
+ *     bit[7]:     Set iff this is the currently used GTK
+ * @reserved: padding
+ * @tkip_mic_key: TKIP RX MIC key
+ * @rsc: TSC RSC counters
+ */
+struct iwl_wowlan_gtk_status {
+       u8 key[WOWLAN_KEY_MAX_SIZE];
+       u8 key_len;
+       u8 key_flags;
+       u8 reserved[2];
+       u8 tkip_mic_key[8];
+       struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+} __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */
+
+#define IWL_WOWLAN_GTK_IDX_MASK                (BIT(0) | BIT(1))
+
+/**
+ * struct iwl_wowlan_igtk_status - IGTK status
+ * @key: IGTK material
+ * @ipn: the IGTK packet number (replay counter)
+ * @key_len: IGTK length, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ *     bits[0]:    key index assigned by the AP (0: index 4, 1: index 5)
+ *     bits[1:5]:  IGTK index of the key in the internal DB
+ *     bit[6]:     Set iff this is the currently used IGTK
+ */
+struct iwl_wowlan_igtk_status {
+       u8 key[WOWLAN_KEY_MAX_SIZE];
+       u8 ipn[6];
+       u8 key_len;
+       u8 key_flags;
+} __packed; /* WOWLAN_IGTK_MATERIAL_VER_1 */
+
+/**
+ * struct iwl_wowlan_status_v6 - WoWLAN status
+ * @gtk: GTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched pattern
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_status_v6 {
+       struct iwl_wowlan_gtk_status_v1 gtk;
+       __le64 replay_ctr;
+       __le16 pattern_number;
+       __le16 non_qos_seq_ctr;
+       __le16 qos_seq_ctr[8];
+       __le32 wakeup_reasons;
+       __le32 num_of_gtk_rekeys;
+       __le32 transmitted_ndps;
+       __le32 received_beacons;
+       __le32 wake_packet_length;
+       __le32 wake_packet_bufsize;
+       u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
+
 /**
  * struct iwl_wowlan_status - WoWLAN status
  * @gtk: GTK data
+ * @igtk: IGTK data
  * @replay_ctr: GTK rekey replay counter
  * @pattern_number: number of the matched pattern
  * @non_qos_seq_ctr: non-QoS sequence counter to use next
@@ -398,7 +475,8 @@ struct iwl_wowlan_gtk_status {
  * @wake_packet: wakeup packet
  */
 struct iwl_wowlan_status {
-       struct iwl_wowlan_gtk_status gtk;
+       struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
+       struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
        __le64 replay_ctr;
        __le16 pattern_number;
        __le16 non_qos_seq_ctr;
@@ -410,7 +488,12 @@ struct iwl_wowlan_status {
        __le32 wake_packet_length;
        __le32 wake_packet_bufsize;
        u8 wake_packet[]; /* can be truncated from _length to _bufsize */
-} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
+
+static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
+{
+       return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
+}
 
 #define IWL_WOWLAN_TCP_MAX_PACKET_LEN          64
 #define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN  128
index 59b3c6e8f37b609cf269c8915cdbe3bf7867079b..eff3249af48ad6a291f07c149b7490421f4e8008 100644 (file)
@@ -99,6 +99,11 @@ enum iwl_data_path_subcmd_ids {
         */
        TLC_MNG_CONFIG_CMD = 0xF,
 
+       /**
+        * @HE_AIR_SNIFFER_CONFIG_CMD: &struct iwl_he_monitor_cmd
+        */
+       HE_AIR_SNIFFER_CONFIG_CMD = 0x13,
+
        /**
         * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
         */
index 106782341544f0c0591866cfd79a1b3467d16fa7..dc1fa377087a60c12659722f769b5a609df1b599 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -336,6 +338,9 @@ struct iwl_dbg_mem_access_rsp {
 #define CONT_REC_COMMAND_SIZE  80
 #define ENABLE_CONT_RECORDING  0x15
 #define DISABLE_CONT_RECORDING 0x16
+#define BUFFER_ALLOCATION      0x27
+#define START_DEBUG_RECORDING  0x29
+#define STOP_DEBUG_RECORDING   0x2A
 
 /*
  * struct iwl_continuous_record_mode - recording mode
@@ -353,4 +358,31 @@ struct iwl_continuous_record_cmd {
                sizeof(struct iwl_continuous_record_mode)];
 } __packed;
 
+/* maximum fragments to be allocated per target of allocationId */
+#define IWL_BUFFER_LOCATION_MAX_FRAGS  2
+
+/**
+ * struct iwl_fragment_data single fragment structure
+ * @address: 64bit start address
+ * @size: size in bytes
+ */
+struct iwl_fragment_data {
+       __le64 address;
+       __le32 size;
+} __packed; /* FRAGMENT_STRUCTURE_API_S_VER_1 */
+
+/**
+ * struct iwl_buffer_allocation_cmd - buffer allocation command structure
+ * @allocation_id: id of the allocation
+ * @buffer_location: location of the buffer
+ * @num_frags: number of fragments
+ * @fragments: memory fragments
+ */
+struct iwl_buffer_allocation_cmd {
+       __le32 allocation_id;
+       __le32 buffer_location;
+       __le32 num_frags;
+       struct iwl_fragment_data fragments[IWL_BUFFER_LOCATION_MAX_FRAGS];
+} __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_1 */
+
 #endif /* __iwl_fw_api_debug_h__ */
index 17c7ef1662a9d6319f34cb1d2e03ca10b5a9407c..ca49db786ed616ee2db015f5fd765bd924315849 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -71,12 +73,59 @@ enum iwl_mac_conf_subcmd_ids {
         * @LOW_LATENCY_CMD: &struct iwl_mac_low_latency_cmd
         */
        LOW_LATENCY_CMD = 0x3,
+       /**
+        * @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif
+        */
+       PROBE_RESPONSE_DATA_NOTIF = 0xFC,
+
        /**
         * @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif
         */
        CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
 };
 
+#define IWL_P2P_NOA_DESC_COUNT (2)
+
+/**
+ * struct iwl_p2p_noa_attr - NOA attr contained in probe resp FW notification
+ *
+ * @id: attribute id
+ * @len_low: length low half
+ * @len_high: length high half
+ * @idx: instance of NoA timing
+ * @ctwin: GO's ct window and pwer save capability
+ * @desc: NoA descriptor
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_p2p_noa_attr {
+       u8 id;
+       u8 len_low;
+       u8 len_high;
+       u8 idx;
+       u8 ctwin;
+       struct ieee80211_p2p_noa_desc desc[IWL_P2P_NOA_DESC_COUNT];
+       u8 reserved;
+} __packed;
+
+#define IWL_PROBE_RESP_DATA_NO_CSA (0xff)
+
+/**
+ * struct iwl_probe_resp_data_notif - notification with NOA and CSA counter
+ *
+ * @mac_id: the mac which should send the probe response
+ * @noa_active: notifies if the noa attribute should be handled
+ * @noa_attr: P2P NOA attribute
+ * @csa_counter: current csa counter
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_probe_resp_data_notif {
+       __le32 mac_id;
+       __le32 noa_active;
+       struct iwl_p2p_noa_attr noa_attr;
+       u8 csa_counter;
+       u8 reserved[3];
+} __packed; /* PROBE_RESPONSE_DATA_NTFY_API_S_VER_1 */
+
 /**
  * struct iwl_channel_switch_noa_notif - Channel switch NOA notification
  *
index 55594c93b014cfe9c94ed689dd7482d835217bfe..1dd23f846fb9c4c89f8a0efa73a71efa83f0f08c 100644 (file)
@@ -578,4 +578,18 @@ struct iwl_he_sta_context_cmd {
        struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
 } __packed; /* STA_CONTEXT_DOT11AX_API_S */
 
+/**
+ * struct iwl_he_monitor_cmd - configure air sniffer for HE
+ * @bssid: the BSSID to sniff for
+ * @reserved1: reserved for dword alignment
+ * @aid: the AID to track on for HE MU
+ * @reserved2: reserved for future use
+ */
+struct iwl_he_monitor_cmd {
+       u8 bssid[6];
+       __le16 reserved1;
+       __le16 aid;
+       u8 reserved2[6];
+} __packed; /* HE_AIR_SNIFFER_CONFIG_CMD_API_S_VER_1 */
+
 #endif /* __iwl_fw_api_mac_h__ */
index 6c53383647942bc915bf09766a24276f4f22b5e2..93b392f0c6a4d1d7cd311852768f0c09ca30f969 100644 (file)
@@ -165,7 +165,7 @@ struct iwl_nvm_access_resp {
  */
 struct iwl_nvm_get_info {
        __le32 reserved;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */
+} __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */
 
 /**
  * enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp
@@ -180,14 +180,14 @@ enum iwl_nvm_info_general_flags {
  * @flags: bit 0: 1 - empty, 0 - non-empty
  * @nvm_version: nvm version
  * @board_type: board type
- * @reserved: reserved
+ * @n_hw_addrs: number of reserved MAC addresses
  */
 struct iwl_nvm_get_info_general {
        __le32 flags;
        __le16 nvm_version;
        u8 board_type;
-       u8 reserved;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */
+       u8 n_hw_addrs;
+} __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */
 
 /**
  * enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku
@@ -231,7 +231,7 @@ struct iwl_nvm_get_info_sku {
 struct iwl_nvm_get_info_phy {
        __le32 tx_chains;
        __le32 rx_chains;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
+} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
 
 #define IWL_NUM_CHANNELS (51)
 
@@ -245,7 +245,7 @@ struct iwl_nvm_get_info_regulatory {
        __le32 lar_enabled;
        __le16 channel_profile[IWL_NUM_CHANNELS];
        __le16 reserved;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
+} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
 
 /**
  * struct iwl_nvm_get_info_rsp - response to get NVM data
@@ -259,7 +259,7 @@ struct iwl_nvm_get_info_rsp {
        struct iwl_nvm_get_info_sku mac_sku;
        struct iwl_nvm_get_info_phy phy_sku;
        struct iwl_nvm_get_info_regulatory regulatory;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_2 */
+} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
 
 /**
  * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
@@ -269,22 +269,6 @@ struct iwl_nvm_access_complete_cmd {
        __le32 reserved;
 } __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
 
-/**
- * struct iwl_mcc_update_cmd_v1 - Request the device to update geographic
- * regulatory profile according to the given MCC (Mobile Country Code).
- * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
- * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
- * MCC in the cmd response will be the relevant MCC in the NVM.
- * @mcc: given mobile country code
- * @source_id: the source from where we got the MCC, see iwl_mcc_source
- * @reserved: reserved for alignment
- */
-struct iwl_mcc_update_cmd_v1 {
-       __le16 mcc;
-       u8 source_id;
-       u8 reserved;
-} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */
-
 /**
  * struct iwl_mcc_update_cmd - Request the device to update geographic
  * regulatory profile according to the given MCC (Mobile Country Code).
@@ -306,7 +290,18 @@ struct iwl_mcc_update_cmd {
 } __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
 
 /**
- * struct iwl_mcc_update_resp_v1  - response to MCC_UPDATE_CMD.
+ * enum iwl_geo_information - geographic information.
+ * @GEO_NO_INFO: no special info for this geo profile.
+ * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params
+ *     for the 5 GHz band.
+ */
+enum iwl_geo_information {
+       GEO_NO_INFO =                   0,
+       GEO_WMM_ETSI_5GHZ_INFO =        BIT(0),
+};
+
+/**
+ * struct iwl_mcc_update_resp_v3 - response to MCC_UPDATE_CMD.
  * Contains the new channel control profile map, if changed, and the new MCC
  * (mobile country code).
  * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
@@ -314,30 +309,23 @@ struct iwl_mcc_update_cmd {
  * @mcc: the new applied MCC
  * @cap: capabilities for all channels which matches the MCC
  * @source_id: the MCC source, see iwl_mcc_source
- * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
- *             channels, depending on platform)
+ * @time: time elapsed from the MCC test start (in units of 30 seconds)
+ * @geo_info: geographic specific profile information
+ *     see &enum iwl_geo_information.
+ * @n_channels: number of channels in @channels_data.
  * @channels: channel control data map, DWORD for each channel. Only the first
  *     16bits are used.
  */
-struct iwl_mcc_update_resp_v {
+struct iwl_mcc_update_resp_v3 {
        __le32 status;
        __le16 mcc;
        u8 cap;
        u8 source_id;
+       __le16 time;
+       __le16 geo_info;
        __le32 n_channels;
        __le32 channels[0];
-} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */
-
-/**
- * enum iwl_geo_information - geographic information.
- * @GEO_NO_INFO: no special info for this geo profile.
- * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params
- *     for the 5 GHz band.
- */
-enum iwl_geo_information {
-       GEO_NO_INFO =                   0,
-       GEO_WMM_ETSI_5GHZ_INFO =        BIT(0),
-};
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
 
 /**
  * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
@@ -347,25 +335,26 @@ enum iwl_geo_information {
  * @status: see &enum iwl_mcc_update_status
  * @mcc: the new applied MCC
  * @cap: capabilities for all channels which matches the MCC
- * @source_id: the MCC source, see iwl_mcc_source
- * @time: time elapsed from the MCC test start (in 30 seconds TU)
+ * @time: time elapsed from the MCC test start (in units of 30 seconds)
  * @geo_info: geographic specific profile information
  *     see &enum iwl_geo_information.
- * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
- *             channels, depending on platform)
+ * @source_id: the MCC source, see iwl_mcc_source
+ * @reserved: for four bytes alignment.
+ * @n_channels: number of channels in @channels_data.
  * @channels: channel control data map, DWORD for each channel. Only the first
  *     16bits are used.
  */
 struct iwl_mcc_update_resp {
        __le32 status;
        __le16 mcc;
-       u8 cap;
-       u8 source_id;
+       __le16 cap;
        __le16 time;
        __le16 geo_info;
+       u8 source_id;
+       u8 reserved[3];
        __le32 n_channels;
        __le32 channels[0];
-} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */
 
 /**
  * struct iwl_mcc_chub_notif - chub notifies of mcc change
index a3c77e01863b086cde85ee132130678c6085ae13..286a22da232dca4fcebb0c9bb4151556ef0beb7c 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -316,7 +318,9 @@ enum iwl_dev_tx_power_cmd_mode {
        IWL_TX_POWER_MODE_SET_DEVICE = 1,
        IWL_TX_POWER_MODE_SET_CHAINS = 2,
        IWL_TX_POWER_MODE_SET_ACK = 3,
-}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_4 */;
+       IWL_TX_POWER_MODE_SET_SAR_TIMER = 4,
+       IWL_TX_POWER_MODE_SET_SAR_TIMER_DEFAULT_TABLE = 5,
+}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_5 */;
 
 #define IWL_NUM_CHAIN_LIMITS   2
 #define IWL_NUM_SUB_BANDS      5
@@ -350,13 +354,35 @@ struct iwl_dev_tx_power_cmd_v3 {
  *     reduction.
  * @reserved: reserved (padding)
  */
-struct iwl_dev_tx_power_cmd {
+struct iwl_dev_tx_power_cmd_v4 {
        /* v4 is just an extension of v3 - keep this here */
        struct iwl_dev_tx_power_cmd_v3 v3;
        u8 enable_ack_reduction;
        u8 reserved[3];
 } __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
 
+/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
+ * @v3: version 3 of the command, embedded here for easier software handling
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ *     reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ *     from last command. used if set_mode is
+ *     IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ *     note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ *     BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ */
+struct iwl_dev_tx_power_cmd {
+       /* v5 is just an extension of v3 - keep this here */
+       struct iwl_dev_tx_power_cmd_v3 v3;
+       u8 enable_ack_reduction;
+       u8 per_chain_restriction_changed;
+       u8 reserved[2];
+       __le32 timer_period;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_5 */
+
 #define IWL_NUM_GEO_PROFILES   3
 
 /**
index 087fae91baeff2fcbaff253e33f04ace4b07c91b..9eddc4dc2ae6f2db28e440714f7a7df0ddc83db2 100644 (file)
 
 /**
  * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
- * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC
+ * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for
+ *                                 bandwidths <= 80MHz
  * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
+ * @IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK: enable STBC in HE at 160MHz
+ *                                           bandwidth
+ * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK: enable HE Dual Carrier Modulation
+ *                                         for BPSK (MCS 0) with 1 spatial
+ *                                         stream
+ * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK: enable HE Dual Carrier Modulation
+ *                                         for BPSK (MCS 0) with 2 spatial
+ *                                         streams
  */
 enum iwl_tlc_mng_cfg_flags {
-       IWL_TLC_MNG_CFG_FLAGS_STBC_MSK          = BIT(0),
-       IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK          = BIT(1),
+       IWL_TLC_MNG_CFG_FLAGS_STBC_MSK                  = BIT(0),
+       IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK                  = BIT(1),
+       IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK        = BIT(2),
+       IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK          = BIT(3),
+       IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK          = BIT(4),
 };
 
 /**
@@ -217,66 +229,6 @@ struct iwl_tlc_update_notif {
        __le32 amsdu_enabled;
 } __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
 
-/**
- * enum iwl_tlc_debug_flags - debug options
- * @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling
- * @IWL_TLC_DEBUG_STATS_TH: threshold for sending statistics to the driver, in
- *     frames
- * @IWL_TLC_DEBUG_STATS_TIME_TH: threshold for sending statistics to the
- *     driver, in msec
- * @IWL_TLC_DEBUG_AGG_TIME_LIM: time limit for a BA session
- * @IWL_TLC_DEBUG_AGG_DIS_START_TH: frame with try-count greater than this
- *     threshold should not start an aggregation session
- * @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames in an aggregation
- * @IWL_TLC_DEBUG_RENEW_ADDBA_DELAY: delay between retries of ADD BA
- * @IWL_TLC_DEBUG_START_AC_RATE_IDX: frames per second to start a BA session
- * @IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK: disable BW scaling
- */
-enum iwl_tlc_debug_flags {
-       IWL_TLC_DEBUG_FIXED_RATE,
-       IWL_TLC_DEBUG_STATS_TH,
-       IWL_TLC_DEBUG_STATS_TIME_TH,
-       IWL_TLC_DEBUG_AGG_TIME_LIM,
-       IWL_TLC_DEBUG_AGG_DIS_START_TH,
-       IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM,
-       IWL_TLC_DEBUG_RENEW_ADDBA_DELAY,
-       IWL_TLC_DEBUG_START_AC_RATE_IDX,
-       IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK,
-}; /* TLC_MNG_DEBUG_FLAGS_API_E_VER_1 */
-
-/**
- * struct iwl_dhc_tlc_dbg - fixed debug config
- * @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id
- * @reserved1: reserved
- * @flags: bitmap of %IWL_TLC_DEBUG_\*
- * @fixed_rate: rate value
- * @stats_threshold: if number of tx-ed frames is greater, send statistics
- * @time_threshold: statistics threshold in usec
- * @agg_time_lim: max agg time
- * @agg_dis_start_threshold: frames with try-cont greater than this count will
- *                          not be aggregated
- * @agg_frame_count_lim: agg size
- * @addba_retry_delay: delay between retries of ADD BA
- * @start_ac_rate_idx: frames per second to start a BA session
- * @no_far_range_tweak: disable BW scaling
- * @reserved2: reserved
- */
-struct iwl_dhc_tlc_cmd {
-       u8 sta_id;
-       u8 reserved1[3];
-       __le32 flags;
-       __le32 fixed_rate;
-       __le16 stats_threshold;
-       __le16 time_threshold;
-       __le16 agg_time_lim;
-       __le16 agg_dis_start_threshold;
-       __le16 agg_frame_count_lim;
-       __le16 addba_retry_delay;
-       u8 start_ac_rate_idx[IEEE80211_NUM_ACS];
-       u8 no_far_range_tweak;
-       u8 reserved2[3];
-} __packed;
-
 /*
  * These serve as indexes into
  * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
index 2f599353c8856b4c9604e4953722771c7fef4837..0537496b6eb19f58ad0796495356a4953bf4da3c 100644 (file)
@@ -362,18 +362,49 @@ enum iwl_rx_he_phy {
        /* 6 bits reserved */
        IWL_RX_HE_PHY_DELIM_EOF                 = BIT(31),
 
-       /* second dword - MU data */
-       IWL_RX_HE_PHY_SIGB_COMPRESSION          = BIT_ULL(32 + 0),
-       IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL,
+       /* second dword - common data */
        IWL_RX_HE_PHY_HE_LTF_NUM_MASK           = 0xe000000000ULL,
        IWL_RX_HE_PHY_RU_ALLOC_SEC80            = BIT_ULL(32 + 8),
        /* trigger encoded */
        IWL_RX_HE_PHY_RU_ALLOC_MASK             = 0xfe0000000000ULL,
-       IWL_RX_HE_PHY_SIGB_MCS_MASK             = 0xf000000000000ULL,
-       /* 1 bit reserved */
-       IWL_RX_HE_PHY_SIGB_DCM                  = BIT_ULL(32 + 21),
-       IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK   = 0xc0000000000000ULL,
-       /* 8 bits reserved */
+       IWL_RX_HE_PHY_INFO_TYPE_MASK            = 0xf000000000000000ULL,
+       IWL_RX_HE_PHY_INFO_TYPE_SU              = 0x0, /* TSF low valid (first DW) */
+       IWL_RX_HE_PHY_INFO_TYPE_MU              = 0x1, /* TSF low/high valid (both DWs) */
+       IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO     = 0x2, /* same + SIGB-common0/1/2 valid */
+       IWL_RX_HE_PHY_INFO_TYPE_TB              = 0x3, /* TSF low/high valid (both DWs) */
+
+       /* second dword - MU data */
+       IWL_RX_HE_PHY_MU_SIGB_COMPRESSION               = BIT_ULL(32 + 0),
+       IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK      = 0x1e00000000ULL,
+       IWL_RX_HE_PHY_MU_SIGB_MCS_MASK                  = 0xf000000000000ULL,
+       IWL_RX_HE_PHY_MU_SIGB_DCM                       = BIT_ULL(32 + 21),
+       IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK        = 0xc0000000000000ULL,
+
+       /* second dword - TB data */
+       IWL_RX_HE_PHY_TB_PILOT_TYPE                     = BIT_ULL(32 + 0),
+       IWL_RX_HE_PHY_TB_LOW_SS_MASK                    = 0xe00000000ULL
+};
+
+enum iwl_rx_he_sigb_common0 {
+       /* the a1/a2/... is what the PHY/firmware calls the values */
+       IWL_RX_HE_SIGB_COMMON0_CH1_RU0          = 0x000000ff, /* a1 */
+       IWL_RX_HE_SIGB_COMMON0_CH1_RU2          = 0x0000ff00, /* a2 */
+       IWL_RX_HE_SIGB_COMMON0_CH2_RU0          = 0x00ff0000, /* b1 */
+       IWL_RX_HE_SIGB_COMMON0_CH2_RU2          = 0xff000000, /* b2 */
+};
+
+enum iwl_rx_he_sigb_common1 {
+       IWL_RX_HE_SIGB_COMMON1_CH1_RU1          = 0x000000ff, /* c1 */
+       IWL_RX_HE_SIGB_COMMON1_CH1_RU3          = 0x0000ff00, /* c2 */
+       IWL_RX_HE_SIGB_COMMON1_CH2_RU1          = 0x00ff0000, /* d1 */
+       IWL_RX_HE_SIGB_COMMON1_CH2_RU3          = 0xff000000, /* d2 */
+};
+
+enum iwl_rx_he_sigb_common2 {
+       IWL_RX_HE_SIGB_COMMON2_CH1_CTR_RU       = 0x0001,
+       IWL_RX_HE_SIGB_COMMON2_CH2_CTR_RU       = 0x0002,
+       IWL_RX_HE_SIGB_COMMON2_CH1_CRC_OK       = 0x0004,
+       IWL_RX_HE_SIGB_COMMON2_CH2_CRC_OK       = 0x0008,
 };
 
 /**
@@ -381,15 +412,31 @@ enum iwl_rx_he_phy {
  */
 struct iwl_rx_mpdu_desc_v1 {
        /* DW7 - carries rss_hash only when rpa_en == 1 */
-       /**
-        * @rss_hash: RSS hash value
-        */
-       __le32 rss_hash;
+       union {
+               /**
+                * @rss_hash: RSS hash value
+                */
+               __le32 rss_hash;
+
+               /**
+                * @sigb_common0: for HE sniffer, HE-SIG-B common part 0
+                */
+               __le32 sigb_common0;
+       };
+
        /* DW8 - carries filter_match only when rpa_en == 1 */
-       /**
-        * @filter_match: filter match value
-        */
-       __le32 filter_match;
+       union {
+               /**
+                * @filter_match: filter match value
+                */
+               __le32 filter_match;
+
+               /**
+                * @sigb_common1: for HE sniffer, HE-SIG-B common part 1
+                */
+               __le32 sigb_common1;
+       };
+
        /* DW9 */
        /**
         * @rate_n_flags: RX rate/flags encoding
@@ -439,15 +486,30 @@ struct iwl_rx_mpdu_desc_v1 {
  */
 struct iwl_rx_mpdu_desc_v3 {
        /* DW7 - carries filter_match only when rpa_en == 1 */
-       /**
-        * @filter_match: filter match value
-        */
-       __le32 filter_match;
+       union {
+               /**
+                * @filter_match: filter match value
+                */
+               __le32 filter_match;
+
+               /**
+                * @sigb_common0: for HE sniffer, HE-SIG-B common part 0
+                */
+               __le32 sigb_common0;
+       };
+
        /* DW8 - carries rss_hash only when rpa_en == 1 */
-       /**
-        * @rss_hash: RSS hash value
-        */
-       __le32 rss_hash;
+       union {
+               /**
+                * @rss_hash: RSS hash value
+                */
+               __le32 rss_hash;
+
+               /**
+                * @sigb_common1: for HE sniffer, HE-SIG-B common part 1
+                */
+               __le32 sigb_common1;
+       };
        /* DW9 */
        /**
         * @partial_hash: 31:0 ip/tcp header hash
@@ -543,10 +605,18 @@ struct iwl_rx_mpdu_desc {
         * @raw_csum: raw checksum (alledgedly unreliable)
         */
        __le16 raw_csum;
-       /**
-        * @l3l4_flags: &enum iwl_rx_l3l4_flags
-        */
-       __le16 l3l4_flags;
+
+       union {
+               /**
+                * @l3l4_flags: &enum iwl_rx_l3l4_flags
+                */
+               __le16 l3l4_flags;
+
+               /**
+                * @sigb_common2: for HE sniffer, HE-SIG-B common part 2
+                */
+               __le16 sigb_common2;
+       };
        /* DW5 */
        /**
         * @status: &enum iwl_rx_mpdu_status
@@ -574,6 +644,69 @@ struct iwl_rx_mpdu_desc {
 
 #define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
 
+#define IWL_CD_STTS_OPTIMIZED_POS      0
+#define IWL_CD_STTS_OPTIMIZED_MSK      0x01
+#define IWL_CD_STTS_TRANSFER_STATUS_POS        1
+#define IWL_CD_STTS_TRANSFER_STATUS_MSK        0x0E
+#define IWL_CD_STTS_WIFI_STATUS_POS    4
+#define IWL_CD_STTS_WIFI_STATUS_MSK    0xF0
+
+/**
+ * enum iwl_completion_desc_transfer_status -  transfer status (bits 1-3)
+ * @IWL_CD_STTS_UNUSED: unused
+ * @IWL_CD_STTS_UNUSED_2: unused
+ * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
+ *     In sniffer mode, when split is used, set in last CD completion. (RX)
+ * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
+ *     all CD completion. (RX)
+ * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
+ * @IWL_CD_STTS_ERROR: general error (RX)
+ */
+enum iwl_completion_desc_transfer_status {
+       IWL_CD_STTS_UNUSED,
+       IWL_CD_STTS_UNUSED_2,
+       IWL_CD_STTS_END_TRANSFER,
+       IWL_CD_STTS_OVERFLOW,
+       IWL_CD_STTS_ABORTED,
+       IWL_CD_STTS_ERROR,
+};
+
+/**
+ * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
+ * @IWL_CD_STTS_VALID: the packet is valid (RX)
+ * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
+ * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
+ * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
+ * @IWL_CD_STTS_DUP: duplicate packet (RX)
+ * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
+ * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
+ * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
+ * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
+ * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
+ * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
+ * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
+ * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
+ * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
+ * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
+ */
+enum iwl_completion_desc_wifi_status {
+       IWL_CD_STTS_VALID,
+       IWL_CD_STTS_FCS_ERR,
+       IWL_CD_STTS_SEC_KEY_ERR,
+       IWL_CD_STTS_DECRYPTION_ERR,
+       IWL_CD_STTS_DUP,
+       IWL_CD_STTS_ICV_MIC_ERR,
+       IWL_CD_STTS_INTERNAL_SNAP_ERR,
+       IWL_CD_STTS_SEC_PORT_FAIL,
+       IWL_CD_STTS_BA_OLD_SN,
+       IWL_CD_STTS_QOS_NULL,
+       IWL_CD_STTS_MAC_HDR_ERR,
+       IWL_CD_STTS_MAX_RETRANS,
+       IWL_CD_STTS_EX_LIFETIME,
+       IWL_CD_STTS_NOT_USED,
+       IWL_CD_STTS_REPLAY_ERR,
+};
+
 struct iwl_frame_release {
        u8 baid;
        u8 reserved;
index a17c4a79b8d468460fa3d62911e568e5e2f05668..18741889ec309e4f7edc9bf3fa0d5c21f143f675 100644 (file)
@@ -262,6 +262,7 @@ enum iwl_scan_channel_flags {
        IWL_SCAN_CHANNEL_FLAG_EBS               = BIT(0),
        IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE      = BIT(1),
        IWL_SCAN_CHANNEL_FLAG_CACHE_ADD         = BIT(2),
+       IWL_SCAN_CHANNEL_FLAG_EBS_FRAG          = BIT(3),
 };
 
 /* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
@@ -595,9 +596,12 @@ enum iwl_umac_scan_general_flags {
  * enum iwl_umac_scan_general_flags2 - UMAC scan general flags #2
  * @IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete
  *     notification per channel or not.
+ * @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel
+ *     reorder optimization or not.
  */
 enum iwl_umac_scan_general_flags2 {
-       IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
+       IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL         = BIT(0),
+       IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER     = BIT(1),
 };
 
 /**
index dc40cbd52f9208953214a2248fbf4da4605e057f..450227f81706277b876869665ceeff54005bd583 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -391,7 +393,7 @@ enum iwl_sta_type {
  * @tfd_queue_msk: tfd queues used by this station.
  *     Obselete for new TX API (9 and above).
  * @rx_ba_window: aggregation window size
- * @sp_length: the size of the SP as it appears in the WME IE
+ * @sp_length: the size of the SP in actual number of frames
  * @uapsd_acs:  4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
  *     enabled ACs.
  *
index 514b86123d3d366fd368e9d49f7d10222f77bae5..358bdf051e837c519a26cebad875e1a0af52cb45 100644 (file)
@@ -186,7 +186,7 @@ enum iwl_tx_cmd_sec_ctrl {
 /*
  * TID for non QoS frames - to be written in tid_tspec
  */
-#define IWL_TID_NON_QOS        IWL_MAX_TID_COUNT
+#define IWL_TID_NON_QOS        0
 
 /*
  * Limits on the retransmissions - to be written in {data,rts}_retry_limit
@@ -747,9 +747,9 @@ enum iwl_mvm_ba_resp_flags {
  * @tfd_cnt: number of TFD-Q elements
  * @ra_tid_cnt: number of RATID-Q elements
  * @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
- *     for details.
+ *     for details. Length in @tfd_cnt.
  * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
- *     &iwl_mvm_compressed_ba_ratid for more details.
+ *     &iwl_mvm_compressed_ba_ratid for more details. Length in @ra_tid_cnt.
  */
 struct iwl_mvm_compressed_ba_notif {
        __le32 flags;
@@ -766,7 +766,7 @@ struct iwl_mvm_compressed_ba_notif {
        __le32 tx_rate;
        __le16 tfd_cnt;
        __le16 ra_tid_cnt;
-       struct iwl_mvm_compressed_ba_tfd tfd[1];
+       struct iwl_mvm_compressed_ba_tfd tfd[0];
        struct iwl_mvm_compressed_ba_ratid ra_tid[0];
 } __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
 
index a31a42e673c46fffec58ac95acbda96a442d803c..f44c716b113015a16911af216d203676e305a08e 100644 (file)
@@ -19,9 +19,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program;
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -243,7 +240,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
        if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
                return;
 
-       if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
+       if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
                /* Pull RXF1 */
                iwl_fwrt_dump_rxf(fwrt, dump_data,
                                  cfg->lmac[0].rxfifo1_size, 0, 0);
@@ -257,7 +254,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
                                          LMAC2_PRPH_OFFSET, 2);
        }
 
-       if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+       if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
                /* Pull TXF data from LMAC1 */
                for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
                        /* Mark the number of TXF we're pulling now */
@@ -282,7 +279,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
                }
        }
 
-       if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+       if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
            fw_has_capa(&fwrt->fw->ucode_capa,
                        IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
                /* Pull UMAC internal TXF data from all TXFs */
@@ -458,8 +455,8 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
        { .start = 0x00a02400, .end = 0x00a02758 },
 };
 
-static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start,
-                                u32 len_bytes, __le32 *data)
+static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
+                               u32 len_bytes, __le32 *data)
 {
        u32 i;
 
@@ -467,21 +464,6 @@ static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start,
                *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
 }
 
-static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start,
-                               u32 len_bytes, __le32 *data)
-{
-       unsigned long flags;
-       bool success = false;
-
-       if (iwl_trans_grab_nic_access(trans, &flags)) {
-               success = true;
-               _iwl_read_prph_block(trans, start, len_bytes, data);
-               iwl_trans_release_nic_access(trans, &flags);
-       }
-
-       return success;
-}
-
 static void iwl_dump_prph(struct iwl_trans *trans,
                          struct iwl_fw_error_dump_data **data,
                          const struct iwl_prph_range *iwl_prph_dump_addr,
@@ -507,11 +489,11 @@ static void iwl_dump_prph(struct iwl_trans *trans,
                prph = (void *)(*data)->data;
                prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
 
-               _iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
-                                    /* our range is inclusive, hence + 4 */
-                                    iwl_prph_dump_addr[i].end -
-                                    iwl_prph_dump_addr[i].start + 4,
-                                    (void *)prph->data);
+               iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
+                                   /* our range is inclusive, hence + 4 */
+                                   iwl_prph_dump_addr[i].end -
+                                   iwl_prph_dump_addr[i].start + 4,
+                                   (void *)prph->data);
 
                *data = iwl_fw_error_next_data(*data);
        }
@@ -556,42 +538,130 @@ static struct scatterlist *alloc_sgtable(int size)
        return table;
 }
 
-void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
+static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt)
+{
+       u32 prph_len = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
+            i++) {
+               /* The range includes both boundaries */
+               int num_bytes_in_chunk =
+                       iwl_prph_dump_addr_comm[i].end -
+                       iwl_prph_dump_addr_comm[i].start + 4;
+
+               prph_len += sizeof(struct iwl_fw_error_dump_data) +
+                       sizeof(struct iwl_fw_error_dump_prph) +
+                       num_bytes_in_chunk;
+       }
+
+       if (fwrt->trans->cfg->mq_rx_supported) {
+               for (i = 0; i <
+                       ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
+                       /* The range includes both boundaries */
+                       int num_bytes_in_chunk =
+                               iwl_prph_dump_addr_9000[i].end -
+                               iwl_prph_dump_addr_9000[i].start + 4;
+
+                       prph_len += sizeof(struct iwl_fw_error_dump_data) +
+                               sizeof(struct iwl_fw_error_dump_prph) +
+                               num_bytes_in_chunk;
+               }
+       }
+       return prph_len;
+}
+
+static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
+                           struct iwl_fw_error_dump_data **dump_data,
+                           u32 len, u32 ofs, u32 type)
+{
+       struct iwl_fw_error_dump_mem *dump_mem;
+
+       if (!len)
+               return;
+
+       (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+       (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
+       dump_mem = (void *)(*dump_data)->data;
+       dump_mem->type = cpu_to_le32(type);
+       dump_mem->offset = cpu_to_le32(ofs);
+       iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
+       *dump_data = iwl_fw_error_next_data(*dump_data);
+
+       IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
+}
+
+#define ADD_LEN(len, item_len, const_len) \
+       do {size_t item = item_len; len += (!!item) * const_len + item; } \
+       while (0)
+
+static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
+                          struct iwl_fwrt_shared_mem_cfg *mem_cfg)
+{
+       size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
+                        sizeof(struct iwl_fw_error_dump_fifo);
+       u32 fifo_len = 0;
+       int i;
+
+       if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)))
+               goto dump_txf;
+
+       /* Count RXF2 size */
+       ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
+
+       /* Count RXF1 sizes */
+       for (i = 0; i < mem_cfg->num_lmacs; i++)
+               ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
+
+dump_txf:
+       if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)))
+               goto dump_internal_txf;
+
+       /* Count TXF sizes */
+       for (i = 0; i < mem_cfg->num_lmacs; i++) {
+               int j;
+
+               for (j = 0; j < mem_cfg->num_txfifo_entries; j++)
+                       ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j],
+                               hdr_len);
+       }
+
+dump_internal_txf:
+       if (!((fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
+             fw_has_capa(&fwrt->fw->ucode_capa,
+                         IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)))
+               goto out;
+
+       for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++)
+               ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len);
+
+out:
+       return fifo_len;
+}
+
+static struct iwl_fw_error_dump_file *
+_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
+                  struct iwl_fw_dump_ptrs *fw_error_dump)
 {
        struct iwl_fw_error_dump_file *dump_file;
        struct iwl_fw_error_dump_data *dump_data;
        struct iwl_fw_error_dump_info *dump_info;
-       struct iwl_fw_error_dump_mem *dump_mem;
        struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg;
        struct iwl_fw_error_dump_trigger_desc *dump_trig;
-       struct iwl_fw_dump_ptrs *fw_error_dump;
-       struct scatterlist *sg_dump_data;
        u32 sram_len, sram_ofs;
-       const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv;
+       const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv;
        struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
-       u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
-       u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
-       u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ?
+       u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0;
+       u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
+       u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
                                0 : fwrt->trans->cfg->dccm2_len;
        bool monitor_dump_only = false;
        int i;
 
-       IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
-
-       /* there's no point in fw dump if the bus is dead */
-       if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
-               IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
-               goto out;
-       }
-
        if (fwrt->dump.trig &&
            fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
                monitor_dump_only = true;
 
-       fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
-       if (!fw_error_dump)
-               goto out;
-
        /* SRAM - include stack CCM if driver knows the values for it */
        if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
                const struct fw_img *img;
@@ -606,138 +676,43 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 
        /* reading RXF/TXF sizes */
        if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
-               fifo_data_len = 0;
-
-               if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
-
-                       /* Count RXF2 size */
-                       if (mem_cfg->rxfifo2_size) {
-                               /* Add header info */
-                               fifo_data_len +=
-                                       mem_cfg->rxfifo2_size +
-                                       sizeof(*dump_data) +
-                                       sizeof(struct iwl_fw_error_dump_fifo);
-                       }
-
-                       /* Count RXF1 sizes */
-                       for (i = 0; i < mem_cfg->num_lmacs; i++) {
-                               if (!mem_cfg->lmac[i].rxfifo1_size)
-                                       continue;
-
-                               /* Add header info */
-                               fifo_data_len +=
-                                       mem_cfg->lmac[i].rxfifo1_size +
-                                       sizeof(*dump_data) +
-                                       sizeof(struct iwl_fw_error_dump_fifo);
-                       }
-               }
-
-               if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
-                       size_t fifo_const_len = sizeof(*dump_data) +
-                               sizeof(struct iwl_fw_error_dump_fifo);
-
-                       /* Count TXF sizes */
-                       for (i = 0; i < mem_cfg->num_lmacs; i++) {
-                               int j;
-
-                               for (j = 0; j < mem_cfg->num_txfifo_entries;
-                                    j++) {
-                                       if (!mem_cfg->lmac[i].txfifo_size[j])
-                                               continue;
-
-                                       /* Add header info */
-                                       fifo_data_len +=
-                                               fifo_const_len +
-                                               mem_cfg->lmac[i].txfifo_size[j];
-                               }
-                       }
-               }
-
-               if ((fwrt->fw->dbg_dump_mask &
-                   BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
-                   fw_has_capa(&fwrt->fw->ucode_capa,
-                               IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
-                       for (i = 0;
-                            i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
-                            i++) {
-                               if (!mem_cfg->internal_txfifo_size[i])
-                                       continue;
-
-                               /* Add header info */
-                               fifo_data_len +=
-                                       mem_cfg->internal_txfifo_size[i] +
-                                       sizeof(*dump_data) +
-                                       sizeof(struct iwl_fw_error_dump_fifo);
-                       }
-               }
+               fifo_len = iwl_fw_fifo_len(fwrt, mem_cfg);
 
                /* Make room for PRPH registers */
                if (!fwrt->trans->cfg->gen2 &&
-                   fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
-                       for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
-                            i++) {
-                               /* The range includes both boundaries */
-                               int num_bytes_in_chunk =
-                                       iwl_prph_dump_addr_comm[i].end -
-                                       iwl_prph_dump_addr_comm[i].start + 4;
-
-                               prph_len += sizeof(*dump_data) +
-                                       sizeof(struct iwl_fw_error_dump_prph) +
-                                       num_bytes_in_chunk;
-                       }
-               }
-
-               if (!fwrt->trans->cfg->gen2 &&
-                   fwrt->trans->cfg->mq_rx_supported &&
-                   fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
-                       for (i = 0; i <
-                               ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
-                               /* The range includes both boundaries */
-                               int num_bytes_in_chunk =
-                                       iwl_prph_dump_addr_9000[i].end -
-                                       iwl_prph_dump_addr_9000[i].start + 4;
-
-                               prph_len += sizeof(*dump_data) +
-                                       sizeof(struct iwl_fw_error_dump_prph) +
-                                       num_bytes_in_chunk;
-                       }
-               }
+                   fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH))
+                       prph_len += iwl_fw_get_prph_len(fwrt);
 
                if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
-                   fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
+                   fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
                        radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
        }
 
-       file_len = sizeof(*dump_file) +
-                  fifo_data_len +
-                  prph_len +
-                  radio_len;
+       file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
 
-       if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
+       if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
                file_len += sizeof(*dump_data) + sizeof(*dump_info);
-       if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
+       if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
                file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
 
-       if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
-               /* Make room for the SMEM, if it exists */
-               if (smem_len)
-                       file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
-                               smem_len;
-
-               /* Make room for the secondary SRAM, if it exists */
-               if (sram2_len)
-                       file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
-                               sram2_len;
-
-               /* Make room for MEM segments */
-               for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
-                       file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
-                                   le32_to_cpu(fw_dbg_mem[i].len);
-               }
+       if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+               size_t hdr_len = sizeof(*dump_data) +
+                                sizeof(struct iwl_fw_error_dump_mem);
+
+               /* Dump SRAM only if no mem_tlvs */
+               if (!fwrt->fw->dbg.n_mem_tlv)
+                       ADD_LEN(file_len, sram_len, hdr_len);
+
+               /* Make room for all mem types that exist */
+               ADD_LEN(file_len, smem_len, hdr_len);
+               ADD_LEN(file_len, sram2_len, hdr_len);
+
+               for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++)
+                       ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len);
        }
 
        /* Make room for fw's virtual image pages, if it exists */
-       if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+       if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
            !fwrt->trans->cfg->gen2 &&
            fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
            fwrt->fw_paging_db[0].fw_paging_block)
@@ -746,33 +721,32 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
                         sizeof(struct iwl_fw_error_dump_paging) +
                         PAGING_BLOCK_SIZE);
 
+       if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
+               file_len += sizeof(*dump_data) +
+                       fwrt->trans->cfg->d3_debug_data_length * 2;
+       }
+
        /* If we only want a monitor dump, reset the file length */
        if (monitor_dump_only) {
                file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
                           sizeof(*dump_info) + sizeof(*dump_smem_cfg);
        }
 
-       if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+       if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
            fwrt->dump.desc)
                file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
                            fwrt->dump.desc->len;
 
-       if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
-           !fwrt->fw->n_dbg_mem_tlv)
-               file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem);
-
        dump_file = vzalloc(file_len);
-       if (!dump_file) {
-               kfree(fw_error_dump);
-               goto out;
-       }
+       if (!dump_file)
+               return NULL;
 
        fw_error_dump->fwrt_ptr = dump_file;
 
        dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
        dump_data = (void *)dump_file->data;
 
-       if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
+       if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
                dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
                dump_data->len = cpu_to_le32(sizeof(*dump_info));
                dump_info = (void *)dump_data->data;
@@ -793,7 +767,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
                dump_data = iwl_fw_error_next_data(dump_data);
        }
 
-       if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
+       if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
                /* Dump shared memory configuration */
                dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
                dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
@@ -824,13 +798,13 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
        }
 
        /* We only dump the FIFOs if the FW is in error state */
-       if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
+       if (fifo_len) {
                iwl_fw_dump_fifos(fwrt, &dump_data);
                if (radio_len)
                        iwl_read_radio_regs(fwrt, &dump_data);
        }
 
-       if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+       if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
            fwrt->dump.desc) {
                dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
                dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
@@ -844,89 +818,54 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
 
        /* In case we only want monitor dump, skip to dump trasport data */
        if (monitor_dump_only)
-               goto dump_trans_data;
-
-       if (!fwrt->fw->n_dbg_mem_tlv &&
-           fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
-               dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
-               dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
-               dump_mem = (void *)dump_data->data;
-               dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
-               dump_mem->offset = cpu_to_le32(sram_ofs);
-               iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data,
-                                        sram_len);
-               dump_data = iwl_fw_error_next_data(dump_data);
-       }
+               goto out;
+
+       if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+               const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem =
+                       fwrt->fw->dbg.mem_tlv;
+
+               if (!fwrt->fw->dbg.n_mem_tlv)
+                       iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs,
+                                       IWL_FW_ERROR_DUMP_MEM_SRAM);
 
-       for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
-               u32 len = le32_to_cpu(fw_dbg_mem[i].len);
-               u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
-               bool success;
-
-               if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)))
-                       break;
-
-               dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
-               dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
-               dump_mem = (void *)dump_data->data;
-               dump_mem->type = fw_dbg_mem[i].data_type;
-               dump_mem->offset = cpu_to_le32(ofs);
-
-               IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n",
-                              dump_mem->type);
-
-               switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) {
-               case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR):
-                       iwl_trans_read_mem_bytes(fwrt->trans, ofs,
-                                                dump_mem->data,
-                                                len);
-                       success = true;
-                       break;
-               case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH):
-                       success = iwl_read_prph_block(fwrt->trans, ofs, len,
-                                                     (void *)dump_mem->data);
-                       break;
-               default:
-                       /*
-                        * shouldn't get here, we ignored this kind
-                        * of TLV earlier during the TLV parsing?!
-                        */
-                       WARN_ON(1);
-                       success = false;
+               for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) {
+                       u32 len = le32_to_cpu(fw_dbg_mem[i].len);
+                       u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
+
+                       iwl_fw_dump_mem(fwrt, &dump_data, len, ofs,
+                                       le32_to_cpu(fw_dbg_mem[i].data_type));
                }
 
-               if (success)
-                       dump_data = iwl_fw_error_next_data(dump_data);
-       }
+               iwl_fw_dump_mem(fwrt, &dump_data, smem_len,
+                               fwrt->trans->cfg->smem_offset,
+                               IWL_FW_ERROR_DUMP_MEM_SMEM);
 
-       if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
-               IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
-               dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
-               dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
-               dump_mem = (void *)dump_data->data;
-               dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
-               dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset);
-               iwl_trans_read_mem_bytes(fwrt->trans,
-                                        fwrt->trans->cfg->smem_offset,
-                                        dump_mem->data, smem_len);
-               dump_data = iwl_fw_error_next_data(dump_data);
+               iwl_fw_dump_mem(fwrt, &dump_data, sram2_len,
+                               fwrt->trans->cfg->dccm2_offset,
+                               IWL_FW_ERROR_DUMP_MEM_SRAM);
        }
 
-       if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
-               IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
-               dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
-               dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
-               dump_mem = (void *)dump_data->data;
-               dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
-               dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset);
-               iwl_trans_read_mem_bytes(fwrt->trans,
-                                        fwrt->trans->cfg->dccm2_offset,
-                                        dump_mem->data, sram2_len);
+       if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
+               u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr;
+               size_t data_size = fwrt->trans->cfg->d3_debug_data_length;
+
+               dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
+               dump_data->len = cpu_to_le32(data_size * 2);
+
+               memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size);
+
+               kfree(fwrt->dump.d3_debug_data);
+               fwrt->dump.d3_debug_data = NULL;
+
+               iwl_trans_read_mem_bytes(fwrt->trans, addr,
+                                        dump_data->data + data_size,
+                                        data_size);
+
                dump_data = iwl_fw_error_next_data(dump_data);
        }
 
        /* Dump fw's virtual image */
-       if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+       if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
            !fwrt->trans->cfg->gen2 &&
            fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
            fwrt->fw_paging_db[0].fw_paging_block) {
@@ -962,13 +901,44 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
                                      ARRAY_SIZE(iwl_prph_dump_addr_9000));
        }
 
-dump_trans_data:
+out:
+       dump_file->file_len = cpu_to_le32(file_len);
+       return dump_file;
+}
+
+void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
+{
+       struct iwl_fw_dump_ptrs *fw_error_dump;
+       struct iwl_fw_error_dump_file *dump_file;
+       struct scatterlist *sg_dump_data;
+       u32 file_len;
+
+       IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
+
+       /* there's no point in fw dump if the bus is dead */
+       if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
+               IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
+               goto out;
+       }
+
+       fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
+       if (!fw_error_dump)
+               goto out;
+
+       dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
+       if (!dump_file) {
+               kfree(fw_error_dump);
+               goto out;
+       }
+
        fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans,
                                                       fwrt->dump.trig);
+       file_len = le32_to_cpu(dump_file->file_len);
        fw_error_dump->fwrt_len = file_len;
-       if (fw_error_dump->trans_ptr)
+       if (fw_error_dump->trans_ptr) {
                file_len += fw_error_dump->trans_ptr->len;
-       dump_file->file_len = cpu_to_le32(file_len);
+               dump_file->file_len = cpu_to_le32(file_len);
+       }
 
        sg_dump_data = alloc_sgtable(file_len);
        if (sg_dump_data) {
@@ -1003,20 +973,39 @@ const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
 };
 IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
 
-int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
-                           const struct iwl_fw_dump_desc *desc,
-                           const struct iwl_fw_dbg_trigger_tlv *trigger)
+void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
 {
-       unsigned int delay = 0;
+       struct iwl_fw_dump_desc *iwl_dump_desc_no_alive =
+               kmalloc(sizeof(*iwl_dump_desc_no_alive), GFP_KERNEL);
+
+       if (!iwl_dump_desc_no_alive)
+               return;
 
-       if (trigger)
-               delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+       iwl_dump_desc_no_alive->trig_desc.type =
+               cpu_to_le32(FW_DBG_TRIGGER_NO_ALIVE);
+       iwl_dump_desc_no_alive->len = 0;
 
+       if (WARN_ON(fwrt->dump.desc))
+               iwl_fw_free_dump_desc(fwrt);
+
+       IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
+                FW_DBG_TRIGGER_NO_ALIVE);
+
+       fwrt->dump.desc = iwl_dump_desc_no_alive;
+       iwl_fw_error_dump(fwrt);
+       clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump);
+
+int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
+                           const struct iwl_fw_dump_desc *desc, void *trigger,
+                           unsigned int delay)
+{
        /*
         * If the loading of the FW completed successfully, the next step is to
         * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
         * zero, the FW was already loaded successully. If the state is "NO_FW"
-        * in such a case - WARN and exit, since FW may be dead. Otherwise, we
+        * in such a case - exit, since FW may be dead. Otherwise, we
         * can try to collect the data, since FW might just not be fully
         * loaded (no "ALIVE" yet), and the debug data is accessible.
         *
@@ -1024,12 +1013,12 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
         *      config. In such a case, due to HW access problems, we might
         *      collect garbage.
         */
-       if (WARN((fwrt->trans->state == IWL_TRANS_NO_FW) &&
-                fwrt->smem_cfg.num_lmacs,
-                "Can't collect dbg data when FW isn't alive\n"))
+       if (fwrt->trans->state == IWL_TRANS_NO_FW &&
+           fwrt->smem_cfg.num_lmacs)
                return -EIO;
 
-       if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
+       if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status) ||
+           test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status))
                return -EBUSY;
 
        if (WARN_ON(fwrt->dump.desc))
@@ -1050,25 +1039,38 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
 int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
                       enum iwl_fw_dbg_trigger trig,
                       const char *str, size_t len,
-                      const struct iwl_fw_dbg_trigger_tlv *trigger)
+                      struct iwl_fw_dbg_trigger_tlv *trigger)
 {
        struct iwl_fw_dump_desc *desc;
+       unsigned int delay = 0;
 
-       if (trigger && trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
-               IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig);
-               iwl_force_nmi(fwrt->trans);
-               return 0;
+       if (trigger) {
+               u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
+
+               if (!le16_to_cpu(trigger->occurrences))
+                       return 0;
+
+               if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
+                       IWL_WARN(fwrt, "Force restart: trigger %d fired.\n",
+                                trig);
+                       iwl_force_nmi(fwrt->trans);
+                       return 0;
+               }
+
+               trigger->occurrences = cpu_to_le16(occurrences);
+               delay = le16_to_cpu(trigger->trig_dis_ms);
        }
 
        desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
        if (!desc)
                return -ENOMEM;
 
+
        desc->len = len;
        desc->trig_desc.type = cpu_to_le32(trig);
        memcpy(desc->trig_desc.data, str, len);
 
-       return iwl_fw_dbg_collect_desc(fwrt, desc, trigger);
+       return iwl_fw_dbg_collect_desc(fwrt, desc, trigger, delay);
 }
 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
 
@@ -1076,13 +1078,9 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
                            struct iwl_fw_dbg_trigger_tlv *trigger,
                            const char *fmt, ...)
 {
-       u16 occurrences = le16_to_cpu(trigger->occurrences);
        int ret, len = 0;
        char buf[64];
 
-       if (!occurrences)
-               return 0;
-
        if (fmt) {
                va_list ap;
 
@@ -1105,7 +1103,6 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
        if (ret)
                return ret;
 
-       trigger->occurrences = cpu_to_le16(occurrences - 1);
        return 0;
 }
 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
@@ -1116,29 +1113,26 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
        int ret;
        int i;
 
-       if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv),
+       if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv),
                      "Invalid configuration %d\n", conf_id))
                return -EINVAL;
 
        /* EARLY START - firmware's configuration is hard coded */
-       if ((!fwrt->fw->dbg_conf_tlv[conf_id] ||
-            !fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
+       if ((!fwrt->fw->dbg.conf_tlv[conf_id] ||
+            !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) &&
            conf_id == FW_DBG_START_FROM_ALIVE)
                return 0;
 
-       if (!fwrt->fw->dbg_conf_tlv[conf_id])
+       if (!fwrt->fw->dbg.conf_tlv[conf_id])
                return -EINVAL;
 
        if (fwrt->dump.conf != FW_DBG_INVALID)
                IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n",
                         fwrt->dump.conf);
 
-       /* start default config marker cmd for syncing logs */
-       iwl_fw_trigger_timestamp(fwrt, 1);
-
        /* Send all HCMDs for configuring the FW debug */
-       ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd;
-       for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
+       ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd;
+       for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) {
                struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
                struct iwl_host_cmd hcmd = {
                        .id = cmd->id,
@@ -1164,6 +1158,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
 {
        struct iwl_fw_runtime *fwrt =
                container_of(work, struct iwl_fw_runtime, dump.wk.work);
+       struct iwl_fw_dbg_params params = {0};
 
        if (fwrt->ops && fwrt->ops->dump_start &&
            fwrt->ops->dump_start(fwrt->ops_ctx))
@@ -1177,41 +1172,42 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
                goto out;
        }
 
-       if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
-               /* stop recording */
-               iwl_fw_dbg_stop_recording(fwrt);
-
-               iwl_fw_error_dump(fwrt);
-
-               /* start recording again if the firmware is not crashed */
-               if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
-                   fwrt->fw->dbg_dest_tlv) {
-                       iwl_clear_bits_prph(fwrt->trans,
-                                           MON_BUFF_SAMPLE_CTL, 0x100);
-                       iwl_clear_bits_prph(fwrt->trans,
-                                           MON_BUFF_SAMPLE_CTL, 0x1);
-                       iwl_set_bits_prph(fwrt->trans,
-                                         MON_BUFF_SAMPLE_CTL, 0x1);
-               }
-       } else {
-               u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
-               u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
+       iwl_fw_dbg_stop_recording(fwrt, &params);
+
+       iwl_fw_error_dump(fwrt);
 
-               iwl_fw_dbg_stop_recording(fwrt);
+       /* start recording again if the firmware is not crashed */
+       if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
+           fwrt->fw->dbg.dest_tlv) {
                /* wait before we collect the data till the DBGC stop */
                udelay(500);
-
-               iwl_fw_error_dump(fwrt);
-
-               /* start recording again if the firmware is not crashed */
-               if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
-                   fwrt->fw->dbg_dest_tlv) {
-                       iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, in_sample);
-                       iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl);
-               }
+               iwl_fw_dbg_restart_recording(fwrt, &params);
        }
 out:
        if (fwrt->ops && fwrt->ops->dump_end)
                fwrt->ops->dump_end(fwrt->ops_ctx);
 }
 
+void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
+{
+       const struct iwl_cfg *cfg = fwrt->trans->cfg;
+
+       if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt))
+               return;
+
+       if (!fwrt->dump.d3_debug_data) {
+               fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length,
+                                                  GFP_KERNEL);
+               if (!fwrt->dump.d3_debug_data) {
+                       IWL_ERR(fwrt,
+                               "failed to allocate memory for D3 debug data\n");
+                       return;
+               }
+       }
+
+       /* if the buffer holds previous debug data it is overwritten */
+       iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr,
+                                fwrt->dump.d3_debug_data,
+                                cfg->d3_debug_data_length);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
index 507d9a49fa972fd6839a92e3c3fcb97b5540460c..d9578dcec24c98cde0c0982154ddc6c6667b8aae 100644 (file)
@@ -19,9 +19,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program;
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -74,6 +71,7 @@
 #include "iwl-io.h"
 #include "file.h"
 #include "error-dump.h"
+#include "api/commands.h"
 
 /**
  * struct iwl_fw_dump_desc - describes the dump
@@ -86,6 +84,16 @@ struct iwl_fw_dump_desc {
        struct iwl_fw_error_dump_trigger_desc trig_desc;
 };
 
+/**
+ * struct iwl_fw_dbg_params - register values to restore
+ * @in_sample: DBGC_IN_SAMPLE value
+ * @out_ctrl: DBGC_OUT_CTRL value
+ */
+struct iwl_fw_dbg_params {
+       u32 in_sample;
+       u32 out_ctrl;
+};
+
 extern const struct iwl_fw_dump_desc iwl_dump_desc_assert;
 
 static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
@@ -99,25 +107,25 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
 void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
 int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
                            const struct iwl_fw_dump_desc *desc,
-                           const struct iwl_fw_dbg_trigger_tlv *trigger);
+                           void *trigger, unsigned int delay);
 int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
                       enum iwl_fw_dbg_trigger trig,
                       const char *str, size_t len,
-                      const struct iwl_fw_dbg_trigger_tlv *trigger);
+                      struct iwl_fw_dbg_trigger_tlv *trigger);
 int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
                            struct iwl_fw_dbg_trigger_tlv *trigger,
                            const char *fmt, ...) __printf(3, 4);
 int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id);
 
 #define iwl_fw_dbg_trigger_enabled(fw, id) ({                  \
-       void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)];      \
+       void *__dbg_trigger = (fw)->dbg.trigger_tlv[(id)];      \
        unlikely(__dbg_trigger);                                \
 })
 
 static inline struct iwl_fw_dbg_trigger_tlv*
 _iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id)
 {
-       return fw->dbg_trigger_tlv[id];
+       return fw->dbg.trigger_tlv[id];
 }
 
 #define iwl_fw_dbg_get_trigger(fw, id) ({                      \
@@ -146,12 +154,9 @@ iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt,
 }
 
 static inline bool
-iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt,
-                         struct iwl_fw_dbg_trigger_tlv *trig)
+iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_ms)
 {
-       unsigned long wind_jiff =
-               msecs_to_jiffies(le16_to_cpu(trig->trig_dis_ms));
-       u32 id = le32_to_cpu(trig->id);
+       unsigned long wind_jiff = msecs_to_jiffies(dis_ms);
 
        /* If this is the first event checked, jump to update start ts */
        if (fwrt->dump.non_collect_ts_start[id] &&
@@ -171,7 +176,8 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
        if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev))
                return false;
 
-       if (iwl_fw_dbg_no_trig_window(fwrt, trig)) {
+       if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id),
+                                     le16_to_cpu(trig->trig_dis_ms))) {
                IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n",
                         trig->id);
                return false;
@@ -180,6 +186,30 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
        return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig);
 }
 
+static inline struct iwl_fw_dbg_trigger_tlv*
+_iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
+                      struct wireless_dev *wdev,
+                      const enum iwl_fw_dbg_trigger id)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+
+       if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id))
+               return NULL;
+
+       trig = _iwl_fw_dbg_get_trigger(fwrt->fw, id);
+
+       if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trig))
+               return NULL;
+
+       return trig;
+}
+
+#define iwl_fw_dbg_trigger_on(fwrt, wdev, id) ({               \
+       BUILD_BUG_ON(!__builtin_constant_p(id));                \
+       BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX);               \
+       _iwl_fw_dbg_trigger_on((fwrt), (wdev), (id));           \
+})
+
 static inline void
 _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
                                struct wireless_dev *wdev,
@@ -199,17 +229,80 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
                                        iwl_fw_dbg_get_trigger((fwrt)->fw,\
                                                               (trig)))
 
-static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
+static int iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start)
+{
+       struct iwl_continuous_record_cmd cont_rec = {};
+       struct iwl_host_cmd hcmd = {
+               .id = LDBG_CONFIG_CMD,
+               .flags = CMD_ASYNC,
+               .data[0] = &cont_rec,
+               .len[0] = sizeof(cont_rec),
+       };
+
+       cont_rec.record_mode.enable_recording = start ?
+               cpu_to_le16(START_DEBUG_RECORDING) :
+               cpu_to_le16(STOP_DEBUG_RECORDING);
+
+       return iwl_trans_send_cmd(fwrt->trans, &hcmd);
+}
+
+static inline void
+_iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
+                          struct iwl_fw_dbg_params *params)
+{
+       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
+               return;
+       }
+
+       if (params) {
+               params->in_sample = iwl_read_prph(trans, DBGC_IN_SAMPLE);
+               params->out_ctrl = iwl_read_prph(trans, DBGC_OUT_CTRL);
+       }
+
+       iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
+       udelay(100);
+       iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+}
+
+static inline void
+iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt,
+                         struct iwl_fw_dbg_params *params)
+{
+       if (fwrt->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+               _iwl_fw_dbg_stop_recording(fwrt->trans, params);
+       else
+               iwl_fw_dbg_start_stop_hcmd(fwrt, false);
+}
+
+static inline void
+_iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
+                             struct iwl_fw_dbg_params *params)
 {
-       if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
-               iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+       if (WARN_ON(!params))
+               return;
+
+       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
+               iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
+               iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
        } else {
-               iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
+               iwl_write_prph(trans, DBGC_IN_SAMPLE, params->in_sample);
                udelay(100);
-               iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
+               iwl_write_prph(trans, DBGC_OUT_CTRL, params->out_ctrl);
        }
 }
 
+static inline void
+iwl_fw_dbg_restart_recording(struct iwl_fw_runtime *fwrt,
+                            struct iwl_fw_dbg_params *params)
+{
+       if (fwrt->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+               _iwl_fw_dbg_restart_recording(fwrt->trans, params);
+       else
+               iwl_fw_dbg_start_stop_hcmd(fwrt, true);
+}
+
 static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
 {
        fwrt->dump.conf = FW_DBG_INVALID;
@@ -217,6 +310,16 @@ static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
 
 void iwl_fw_error_dump_wk(struct work_struct *work);
 
+static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
+{
+       return fw_has_capa(&fwrt->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
+               fwrt->trans->cfg->d3_debug_data_length &&
+               fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
+}
+
+void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
+
 static inline void iwl_fw_flush_dump(struct iwl_fw_runtime *fwrt)
 {
        flush_delayed_work(&fwrt->dump.wk);
@@ -263,4 +366,5 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
 
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
 
+void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
 #endif  /* __iwl_fw_dbg_h__ */
index 8ba5a60ec9ed357520d4f17c7eff7194837968f0..3e120dd47305728342ae1442b517894cd5cd90d8 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -33,6 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include "debugfs.h"
 #include "dbg.h"
 
-#define FWRT_DEBUGFS_READ_FILE_OPS(name)                               \
-static ssize_t iwl_dbgfs_##name##_read(struct iwl_fw_runtime *fwrt,    \
-                                      char *buf, size_t count,         \
-                                      loff_t *ppos);                   \
+#define FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)               \
+struct dbgfs_##name##_data {                                           \
+       argtype *arg;                                                   \
+       bool read_done;                                                 \
+       ssize_t rlen;                                                   \
+       char rbuf[buflen];                                              \
+};                                                                     \
+static int _iwl_dbgfs_##name##_open(struct inode *inode,               \
+                                   struct file *file)                  \
+{                                                                      \
+       struct dbgfs_##name##_data *data;                               \
+                                                                       \
+       data = kzalloc(sizeof(*data), GFP_KERNEL);                      \
+       if (!data)                                                      \
+               return -ENOMEM;                                         \
+                                                                       \
+       data->read_done = false;                                        \
+       data->arg = inode->i_private;                                   \
+       file->private_data = data;                                      \
+                                                                       \
+       return 0;                                                       \
+}
+
+#define FWRT_DEBUGFS_READ_WRAPPER(name)                                        \
+static ssize_t _iwl_dbgfs_##name##_read(struct file *file,             \
+                                       char __user *user_buf,          \
+                                       size_t count, loff_t *ppos)     \
+{                                                                      \
+       struct dbgfs_##name##_data *data = file->private_data;          \
+                                                                       \
+       if (!data->read_done) {                                         \
+               data->read_done = true;                                 \
+               data->rlen = iwl_dbgfs_##name##_read(data->arg,         \
+                                                    sizeof(data->rbuf),\
+                                                    data->rbuf);       \
+       }                                                               \
+                                                                       \
+       if (data->rlen < 0)                                             \
+               return data->rlen;                                      \
+       return simple_read_from_buffer(user_buf, count, ppos,           \
+                                      data->rbuf, data->rlen);         \
+}
+
+static int _iwl_dbgfs_release(struct inode *inode, struct file *file)
+{
+       kfree(file->private_data);
+
+       return 0;
+}
+
+#define _FWRT_DEBUGFS_READ_FILE_OPS(name, buflen, argtype)             \
+FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)                       \
+FWRT_DEBUGFS_READ_WRAPPER(name)                                                \
 static const struct file_operations iwl_dbgfs_##name##_ops = {         \
-       .read = iwl_dbgfs_##name##_read,                                \
-       .open = simple_open,                                            \
+       .read = _iwl_dbgfs_##name##_read,                               \
+       .open = _iwl_dbgfs_##name##_open,                               \
        .llseek = generic_file_llseek,                                  \
+       .release = _iwl_dbgfs_release,                                  \
 }
 
-#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen)                       \
-static ssize_t iwl_dbgfs_##name##_write(struct iwl_fw_runtime *fwrt,   \
-                                       char *buf, size_t count,        \
-                                       loff_t *ppos);                  \
+#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)              \
 static ssize_t _iwl_dbgfs_##name##_write(struct file *file,            \
                                         const char __user *user_buf,   \
                                         size_t count, loff_t *ppos)    \
 {                                                                      \
-       struct iwl_fw_runtime *fwrt = file->private_data;               \
+       argtype *arg =                                                  \
+               ((struct dbgfs_##name##_data *)file->private_data)->arg;\
        char buf[buflen] = {};                                          \
        size_t buf_size = min(count, sizeof(buf) -  1);                 \
                                                                        \
        if (copy_from_user(buf, user_buf, buf_size))                    \
                return -EFAULT;                                         \
                                                                        \
-       return iwl_dbgfs_##name##_write(fwrt, buf, buf_size, ppos);     \
+       return iwl_dbgfs_##name##_write(arg, buf, buf_size);            \
 }
 
-#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen)                 \
-FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen)                               \
+#define _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype)       \
+FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)                       \
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)                      \
+FWRT_DEBUGFS_READ_WRAPPER(name)                                                \
 static const struct file_operations iwl_dbgfs_##name##_ops = {         \
        .write = _iwl_dbgfs_##name##_write,                             \
-       .read = iwl_dbgfs_##name##_read,                                \
-       .open = simple_open,                                            \
+       .read = _iwl_dbgfs_##name##_read,                               \
+       .open = _iwl_dbgfs_##name##_open,                               \
        .llseek = generic_file_llseek,                                  \
+       .release = _iwl_dbgfs_release,                                  \
 }
 
-#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen)                      \
-FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen)                               \
+#define _FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype)            \
+FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)                       \
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)                      \
 static const struct file_operations iwl_dbgfs_##name##_ops = {         \
        .write = _iwl_dbgfs_##name##_write,                             \
-       .open = simple_open,                                            \
+       .open = _iwl_dbgfs_##name##_open,                               \
        .llseek = generic_file_llseek,                                  \
+       .release = _iwl_dbgfs_release,                                  \
 }
 
+#define FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz)                                \
+       _FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
+
+#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz)                       \
+       _FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
+
+#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz)                  \
+       _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
+
 #define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do {    \
-               if (!debugfs_create_file(alias, mode, parent, fwrt,     \
-                                        &iwl_dbgfs_##name##_ops))      \
-                       goto err;                                       \
+       if (!debugfs_create_file(alias, mode, parent, fwrt,             \
+                                &iwl_dbgfs_##name##_ops))              \
+               goto err;                                               \
        } while (0)
 #define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \
        FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
@@ -173,8 +234,7 @@ void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay)
 }
 
 static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt,
-                                               char *buf, size_t count,
-                                               loff_t *ppos)
+                                               char *buf, size_t count)
 {
        int ret;
        u32 delay;
@@ -188,13 +248,85 @@ static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt,
        return count;
 }
 
-FWRT_DEBUGFS_WRITE_FILE_OPS(timestamp_marker, 10);
+static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime *fwrt,
+                                              size_t size, char *buf)
+{
+       u32 delay_secs = jiffies_to_msecs(fwrt->timestamp.delay) / 1000;
+
+       return scnprintf(buf, size, "%d\n", delay_secs);
+}
+
+FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16);
+
+struct hcmd_write_data {
+       __be32 cmd_id;
+       __be32 flags;
+       __be16 length;
+       u8 data[0];
+} __packed;
+
+static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
+                                        size_t count)
+{
+       size_t header_size = (sizeof(u32) * 2 + sizeof(u16)) * 2;
+       size_t data_size = (count - 1) / 2;
+       int ret;
+       struct hcmd_write_data *data;
+       struct iwl_host_cmd hcmd = {
+               .len = { 0, },
+               .data = { NULL, },
+       };
+
+       if (fwrt->ops && fwrt->ops->fw_running &&
+           !fwrt->ops->fw_running(fwrt->ops_ctx))
+               return -EIO;
+
+       if (count < header_size + 1 || count > 1024 * 4)
+               return -EINVAL;
+
+       data = kmalloc(data_size, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       ret = hex2bin((u8 *)data, buf, data_size);
+       if (ret)
+               goto out;
+
+       hcmd.id = be32_to_cpu(data->cmd_id);
+       hcmd.flags = be32_to_cpu(data->flags);
+       hcmd.len[0] = be16_to_cpu(data->length);
+       hcmd.data[0] = data->data;
+
+       if (count != header_size + hcmd.len[0] * 2 + 1) {
+               IWL_ERR(fwrt,
+                       "host command data size does not match header length\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (fwrt->ops && fwrt->ops->send_hcmd)
+               ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
+       else
+               ret = -EPERM;
+
+       if (ret < 0)
+               goto out;
+
+       if (hcmd.flags & CMD_WANT_SKB)
+               iwl_free_resp(&hcmd);
+out:
+       kfree(data);
+       return ret ?: count;
+}
+
+FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
 
 int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
                            struct dentry *dbgfs_dir)
 {
        INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
        FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
+       FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
        return 0;
 err:
        IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n");
index cbbfa8e9e66d60f72383320054efb025bfc13a05..88255035e8ef92e2ed7fe9da5da0f38c7061260c 100644 (file)
@@ -18,9 +18,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index ed7beca8817e2e435dfbdc09ab3f977343614e9e..6fede174c6649cd6395927ad8b2d43373144d882 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -35,6 +31,7 @@
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -116,6 +113,7 @@ enum iwl_fw_error_dump_type {
        IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14,
        IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */
        IWL_FW_ERROR_DUMP_MEM_CFG = 16,
+       IWL_FW_ERROR_DUMP_D3_DEBUG_DATA = 17,
 
        IWL_FW_ERROR_DUMP_MAX,
 };
@@ -330,6 +328,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
  * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
  * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
  *  the firmware sends a tx reply.
+ * @FW_DBG_TRIGGER_NO_ALIVE: trigger log collection if alive flow fails
  */
 enum iwl_fw_dbg_trigger {
        FW_DBG_TRIGGER_INVALID = 0,
@@ -347,6 +346,7 @@ enum iwl_fw_dbg_trigger {
        FW_DBG_TRIGGER_TX_LATENCY,
        FW_DBG_TRIGGER_TDLS,
        FW_DBG_TRIGGER_TX_STATUS,
+       FW_DBG_TRIGGER_NO_ALIVE,
 
        /* must be last */
        FW_DBG_TRIGGER_MAX,
index bbf2b265a06a649f1e19157861bc375ca7c4c705..6005a41c53d1a86d922ffff88b50e1568d5f30c0 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -258,6 +253,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
  *     deprecated.
  * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2: This ucode supports version 8
  *     of scan request: SCAN_REQUEST_CMD_UMAC_API_S_VER_8
+ * @IWL_UCODE_TLV_API_FRAG_EBS: This ucode supports fragmented EBS
+ * @IWL_UCODE_TLV_API_REDUCE_TX_POWER: This ucode supports v5 of
+ *     the REDUCE_TX_POWER_CMD.
  *
  * @NUM_IWL_UCODE_TLV_API: number of bits used
  */
@@ -276,9 +274,12 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_OCE                   = (__force iwl_ucode_tlv_api_t)33,
        IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE   = (__force iwl_ucode_tlv_api_t)34,
        IWL_UCODE_TLV_API_NEW_RX_STATS          = (__force iwl_ucode_tlv_api_t)35,
+       IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL   = (__force iwl_ucode_tlv_api_t)36,
        IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY     = (__force iwl_ucode_tlv_api_t)38,
        IWL_UCODE_TLV_API_DEPRECATE_TTAK        = (__force iwl_ucode_tlv_api_t)41,
        IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2     = (__force iwl_ucode_tlv_api_t)42,
+       IWL_UCODE_TLV_API_FRAG_EBS              = (__force iwl_ucode_tlv_api_t)44,
+       IWL_UCODE_TLV_API_REDUCE_TX_POWER       = (__force iwl_ucode_tlv_api_t)45,
 
        NUM_IWL_UCODE_TLV_API
 #ifdef __CHECKER__
@@ -325,6 +326,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
  * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
  * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
+ * @IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2: firmware implements Coex Schema 2
  * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
  * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
  * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@@ -335,7 +337,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
  *     antenna the beacon should be transmitted
  * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
  *     from AP and will send it upon d0i3 exit.
- * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
+ * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3: support LAR API V3
  * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
  * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
  *     thresholds reporting
@@ -349,6 +351,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
  *     command size (command version 4) that supports toggling ACK TX
  *     power reduction.
  * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
+ * @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3
+ * @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax
+ *     capability.
  *
  * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
  */
@@ -381,6 +386,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_D0I3_END_FIRST               = (__force iwl_ucode_tlv_capa_t)41,
        IWL_UCODE_TLV_CAPA_TLC_OFFLOAD                  = (__force iwl_ucode_tlv_capa_t)43,
        IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA                = (__force iwl_ucode_tlv_capa_t)44,
+       IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2                = (__force iwl_ucode_tlv_capa_t)45,
        IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE         = (__force iwl_ucode_tlv_capa_t)64,
        IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS            = (__force iwl_ucode_tlv_capa_t)65,
        IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT             = (__force iwl_ucode_tlv_capa_t)67,
@@ -388,7 +394,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD         = (__force iwl_ucode_tlv_capa_t)70,
        IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION         = (__force iwl_ucode_tlv_capa_t)71,
        IWL_UCODE_TLV_CAPA_BEACON_STORING               = (__force iwl_ucode_tlv_capa_t)72,
-       IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2               = (__force iwl_ucode_tlv_capa_t)73,
+       IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3               = (__force iwl_ucode_tlv_capa_t)73,
        IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW                = (__force iwl_ucode_tlv_capa_t)74,
        IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT      = (__force iwl_ucode_tlv_capa_t)75,
        IWL_UCODE_TLV_CAPA_CTDP_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)76,
@@ -396,7 +402,9 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG        = (__force iwl_ucode_tlv_capa_t)80,
        IWL_UCODE_TLV_CAPA_LQM_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)81,
        IWL_UCODE_TLV_CAPA_TX_POWER_ACK                 = (__force iwl_ucode_tlv_capa_t)84,
-       IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT              = (__force iwl_ucode_tlv_capa_t)86,
+       IWL_UCODE_TLV_CAPA_D3_DEBUG                     = (__force iwl_ucode_tlv_capa_t)87,
+       IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT              = (__force iwl_ucode_tlv_capa_t)88,
+       IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT      = (__force iwl_ucode_tlv_capa_t)89,
        IWL_UCODE_TLV_CAPA_MLME_OFFLOAD                 = (__force iwl_ucode_tlv_capa_t)96,
 
        NUM_IWL_UCODE_TLV_CAPA
@@ -527,23 +535,10 @@ enum iwl_fw_dbg_monitor_mode {
        MIPI_MODE = 3,
 };
 
-/**
- * enum iwl_fw_mem_seg_type - memory segment type
- * @FW_DBG_MEM_TYPE_MASK: mask for the type indication
- * @FW_DBG_MEM_TYPE_REGULAR: regular memory
- * @FW_DBG_MEM_TYPE_PRPH: periphery memory (requires special reading)
- */
-enum iwl_fw_mem_seg_type {
-       FW_DBG_MEM_TYPE_MASK    = 0xff000000,
-       FW_DBG_MEM_TYPE_REGULAR = 0x00000000,
-       FW_DBG_MEM_TYPE_PRPH    = 0x01000000,
-};
-
 /**
  * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
  *
- * @data_type: the memory segment type to record, see &enum iwl_fw_mem_seg_type
- *     for what we care about
+ * @data_type: the memory segment type to record
  * @ofs: the memory segment offset
  * @len: the memory segment length, in bytes
  *
index 0861b97c4233c8a7eeb1294f5ca0c9042c0b1f8c..54dbbd998abfd1b54fa7821fbadb7a22259f6d34 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -202,6 +197,29 @@ enum iwl_fw_type {
        IWL_FW_MVM,
 };
 
+/**
+ * struct iwl_fw_dbg - debug data
+ *
+ * @dest_tlv: points to debug destination TLV (typically SRAM or DRAM)
+ * @n_dest_reg: num of reg_ops in dest_tlv
+ * @conf_tlv: array of pointers to configuration HCMDs
+ * @trigger_tlv: array of pointers to triggers TLVs
+ * @trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
+ * @mem_tlv: Runtime addresses to dump
+ * @n_mem_tlv: number of runtime addresses
+ * @dump_mask: bitmask of dump regions
+*/
+struct iwl_fw_dbg {
+       struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
+       u8 n_dest_reg;
+       struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
+       struct iwl_fw_dbg_trigger_tlv *trigger_tlv[FW_DBG_TRIGGER_MAX];
+       size_t trigger_tlv_len[FW_DBG_TRIGGER_MAX];
+       struct iwl_fw_dbg_mem_seg_tlv *mem_tlv;
+       size_t n_mem_tlv;
+       u32 dump_mask;
+};
+
 /**
  * struct iwl_fw - variables associated with the firmware
  *
@@ -222,12 +240,6 @@ enum iwl_fw_type {
  * @cipher_scheme: optional external cipher scheme.
  * @human_readable: human readable version
  *     we get the ALIVE from the uCode
- * @dbg_dest_tlv: points to the destination TLV for debug
- * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
- * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
- * @dbg_trigger_tlv: array of pointers to triggers TLVs
- * @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
- * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
  */
 struct iwl_fw {
        u32 ucode_ver;
@@ -255,15 +267,7 @@ struct iwl_fw {
        struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
        u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
 
-       struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
-       struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
-       size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
-       struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
-       struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
-       size_t n_dbg_mem_tlv;
-       size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
-       u8 dbg_dest_reg_num;
-       u32 dbg_dump_mask;
+       struct iwl_fw_dbg dbg;
 };
 
 static inline const char *get_fw_dbg_mode_string(int mode)
@@ -285,7 +289,7 @@ static inline const char *get_fw_dbg_mode_string(int mode)
 static inline bool
 iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
 {
-       const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
+       const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg.conf_tlv[id];
 
        if (!conf_tlv)
                return false;
index 1096c945a68bc023556b756bb02b82ef92150e38..379735e086dc0ec2e192242d80495d2dd3e4a41b 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 368884be4e7c9641f80e402bb94b9e4a92d114fe..61b067eeeac933c57099c0f047b46a557ec6382e 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index ed23367f7088d5f5b72fb827b3539934b54fe9a8..6b95d0e758897cbecf8cac036411cce590d408e8 100644 (file)
@@ -71,6 +71,7 @@ struct iwl_fw_runtime_ops {
        int (*dump_start)(void *ctx);
        void (*dump_end)(void *ctx);
        bool (*fw_running)(void *ctx);
+       int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
 };
 
 #define MAX_NUM_LMAC 2
@@ -88,6 +89,7 @@ struct iwl_fwrt_shared_mem_cfg {
 
 enum iwl_fw_runtime_status {
        IWL_FWRT_STATUS_DUMPING = 0,
+       IWL_FWRT_STATUS_WAIT_ALIVE,
 };
 
 /**
@@ -136,6 +138,7 @@ struct iwl_fw_runtime {
 
                /* ts of the beginning of a non-collect fw dbg data period */
                unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1];
+               u32 *d3_debug_data;
        } dump;
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        struct {
index ee9347a54cdcc1ca333853705e150f2617c49932..359537620c9344e1bd526e1934175d056b8877f2 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 12fddcf15bab394521122a2fecaab3d2dcd83579..5eb906a0d0d25489e2c520b84bb253ede80969fd 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -388,6 +383,8 @@ struct iwl_csr_params {
  * @gen2: 22000 and on transport operation
  * @cdb: CDB support
  * @nvm_type: see &enum iwl_nvm_type
+ * @d3_debug_data_base_addr: base address where D3 debug data is stored
+ * @d3_debug_data_length: length of the D3 debug data
  *
  * We enable the driver to be backward compatible wrt. hardware features.
  * API differences in uCode shouldn't be handled here but through TLVs
@@ -452,6 +449,8 @@ struct iwl_cfg {
        u8 ucode_api_min;
        u32 min_umac_error_event_table;
        u32 extra_phy_cfg_flags;
+       u32 d3_debug_data_base_addr;
+       u32 d3_debug_data_length;
 };
 
 static const struct iwl_csr_params iwl_csr_v1 = {
@@ -574,11 +573,18 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
 extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
 extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
 extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
+extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_jf;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
 extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
 extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
-#endif /* CONFIG_IWLMVM */
+#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
 
 #endif /* __IWL_CONFIG_H__ */
index 4b6fdf3b15fbe4e0d4f096e128b788e518966919..5ed07e37e3eef36f58a3e883a849f5604018de7a 100644 (file)
  *     the init done for driver command that configures several system modes
  * @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
  * @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
- * @IWL_CTXT_INFO_RB_SIZE_4K: Use 4K RB size (the default is 2K)
  * @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
  *     exponent, the actual size is 2**value, valid sizes are 8-2048.
  *     The value is four bits long. Maximum valid exponent is 12
  * @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
  *     default is short format - not supported by the driver)
+ * @IWL_CTXT_INFO_RB_SIZE_POS: RB size position
+ *     (values are IWL_CTXT_INFO_RB_SIZE_*K)
+ * @IWL_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_4K: Value for 4K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_8K: Value for 8K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_12K: Value for 12K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_16K: Value for 16K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_20K: Value for 20K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_24K: Value for 24K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_28K: Value for 28K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size
  */
 enum iwl_context_info_flags {
        IWL_CTXT_INFO_AUTO_FUNC_INIT    = BIT(0),
        IWL_CTXT_INFO_EARLY_DEBUG       = BIT(1),
        IWL_CTXT_INFO_ENABLE_CDMP       = BIT(2),
-       IWL_CTXT_INFO_RB_SIZE_4K        = BIT(3),
        IWL_CTXT_INFO_RB_CB_SIZE_POS    = 4,
        IWL_CTXT_INFO_TFD_FORMAT_LONG   = BIT(8),
+       IWL_CTXT_INFO_RB_SIZE_POS       = 9,
+       IWL_CTXT_INFO_RB_SIZE_1K        = 0x1,
+       IWL_CTXT_INFO_RB_SIZE_2K        = 0x2,
+       IWL_CTXT_INFO_RB_SIZE_4K        = 0x4,
+       IWL_CTXT_INFO_RB_SIZE_8K        = 0x8,
+       IWL_CTXT_INFO_RB_SIZE_12K       = 0x9,
+       IWL_CTXT_INFO_RB_SIZE_16K       = 0xa,
+       IWL_CTXT_INFO_RB_SIZE_20K       = 0xb,
+       IWL_CTXT_INFO_RB_SIZE_24K       = 0xc,
+       IWL_CTXT_INFO_RB_SIZE_28K       = 0xd,
+       IWL_CTXT_INFO_RB_SIZE_32K       = 0xe,
 };
 
 /*
index 9019de99f077f1f069bcb37d8cbc5e51fe110c65..caa5806acd81b299d83401feae3bee67aac30655 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
 #define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER     (0x000000C0)
 #define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI                (0x00000100)
 #define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI      (0x00000200)
+#define CSR_HW_IF_CONFIG_REG_D3_DEBUG          (0x00000200)
 #define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE      (0x00000C00)
 #define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH      (0x00003000)
 #define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP      (0x0000C000)
index b1c3b0d0fcc65aaa2e3d20f68475ccdfdf05bde8..e1a41fd503a82922eb59b0fb62a4196ec06ab6b0 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index c023fcf5d4526b5e6482d3404ddf9d6422cc4db2..a2af68a0d34bba4d190f189e38d445f420f163bd 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index a80e4202cd03d84eecf67d0f1b677bbc2f2098a2..2cc6c019d0e1dc1ec0e789dfd55b71a9a00ad8a4 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
@@ -76,12 +73,11 @@ TRACE_EVENT(iwlwifi_dev_rx_data,
        TP_ARGS(dev, trans, rxbuf, len),
        TP_STRUCT__entry(
                DEV_ENTRY
-
                __dynamic_array(u8, data,
-                               len - iwl_rx_trace_len(trans, rxbuf, len))
+                               len - iwl_rx_trace_len(trans, rxbuf, len, NULL))
        ),
        TP_fast_assign(
-               size_t offs = iwl_rx_trace_len(trans, rxbuf, len);
+               size_t offs = iwl_rx_trace_len(trans, rxbuf, len, NULL);
                DEV_ASSIGN;
                if (offs < len)
                        memcpy(__get_dynamic_array(data),
index 4164dc1745ed2343230b36ff9e01a404ffa46968..7bb4e0e9bb6989963fe66d9de5413c4b4742028f 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 27e3e4e96aa27b52442d3f555511bab4d3e51c3d..8e87186682e72c4930e0761a08566b6e47a7d9d8 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
@@ -75,13 +72,18 @@ TRACE_EVENT(iwlwifi_dev_rx,
        TP_STRUCT__entry(
                DEV_ENTRY
                __field(u16, cmd)
-               __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
+               __field(u8, hdr_offset)
+               __dynamic_array(u8, rxbuf,
+                               iwl_rx_trace_len(trans, pkt, len, NULL))
        ),
        TP_fast_assign(
+               size_t hdr_offset = 0;
+
                DEV_ASSIGN;
                __entry->cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
                memcpy(__get_dynamic_array(rxbuf), pkt,
-                      iwl_rx_trace_len(trans, pkt, len));
+                      iwl_rx_trace_len(trans, pkt, len, &hdr_offset));
+               __entry->hdr_offset = hdr_offset;
        ),
        TP_printk("[%s] RX cmd %#.2x",
                  __get_str(dev), __entry->cmd)
@@ -126,61 +128,6 @@ TRACE_EVENT(iwlwifi_dev_tx,
                  __entry->framelen, __entry->skbaddr)
 );
 
-struct iwl_error_event_table;
-TRACE_EVENT(iwlwifi_dev_ucode_error,
-       TP_PROTO(const struct device *dev, const struct iwl_error_event_table *table,
-                u32 hw_ver, u32 brd_ver),
-       TP_ARGS(dev, table, hw_ver, brd_ver),
-       TP_STRUCT__entry(
-               DEV_ENTRY
-               __field(u32, desc)
-               __field(u32, tsf_low)
-               __field(u32, data1)
-               __field(u32, data2)
-               __field(u32, line)
-               __field(u32, blink2)
-               __field(u32, ilink1)
-               __field(u32, ilink2)
-               __field(u32, bcon_time)
-               __field(u32, gp1)
-               __field(u32, gp2)
-               __field(u32, rev_type)
-               __field(u32, major)
-               __field(u32, minor)
-               __field(u32, hw_ver)
-               __field(u32, brd_ver)
-       ),
-       TP_fast_assign(
-               DEV_ASSIGN;
-               __entry->desc = table->error_id;
-               __entry->tsf_low = table->tsf_low;
-               __entry->data1 = table->data1;
-               __entry->data2 = table->data2;
-               __entry->line = table->line;
-               __entry->blink2 = table->blink2;
-               __entry->ilink1 = table->ilink1;
-               __entry->ilink2 = table->ilink2;
-               __entry->bcon_time = table->bcon_time;
-               __entry->gp1 = table->gp1;
-               __entry->gp2 = table->gp2;
-               __entry->rev_type = table->gp3;
-               __entry->major = table->ucode_ver;
-               __entry->minor = table->hw_ver;
-               __entry->hw_ver = hw_ver;
-               __entry->brd_ver = brd_ver;
-       ),
-       TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
-                 "blink2 0x%05X ilink 0x%05X 0x%05X "
-                 "bcon_tm %010u gp 0x%08X 0x%08X rev_type 0x%08X major 0x%08X "
-                 "minor 0x%08X hw 0x%08X brd 0x%08X",
-                 __get_str(dev), __entry->desc, __entry->tsf_low,
-                 __entry->data1, __entry->data2, __entry->line,
-                 __entry->blink2, __entry->ilink1, __entry->ilink2,
-                 __entry->bcon_time, __entry->gp1, __entry->gp2,
-                 __entry->rev_type, __entry->major, __entry->minor,
-                 __entry->hw_ver, __entry->brd_ver)
-);
-
 TRACE_EVENT(iwlwifi_dev_ucode_event,
        TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
        TP_ARGS(dev, time, data, ev),
index 5dfc9295a7e0136cc6de55f70fc1bdf7288fe8d9..32984c1f39a1baf79f20f10adab4128ef24ca2fe 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index e9b8673dd245432c1bdedcf652ae502b89b42156..53842226ef1b18d4f31c5584efc412bee217323c 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 6aa719865a58a9fa148244f67ae81a9bfa701ffd..9805432f124f3410e7b043961c47141f9a59f7b1 100644 (file)
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
 #ifndef __CHECKER__
 #include "iwl-trans.h"
 
-#include "dvm/commands.h"
 #define CREATE_TRACE_POINTS
 #include "iwl-devtrace.h"
 
 EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
 EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
 EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
 #endif
index f5c1127253cb13fcccde047ba2d210b9f41fb556..fc649b2bc0176573f30c67744ae6e1f63878f66c 100644 (file)
@@ -1,7 +1,8 @@
 /******************************************************************************
  *
  * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
- * Copyright(C) 2016 Intel Deutschland GmbH
+ * Copyright(C) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
@@ -60,16 +57,23 @@ static inline bool iwl_trace_data(struct sk_buff *skb)
 }
 
 static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
-                                     void *rxbuf, size_t len)
+                                     void *rxbuf, size_t len,
+                                     size_t *out_hdr_offset)
 {
        struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32));
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr *hdr = NULL;
+       size_t hdr_offset;
 
        if (cmd->cmd != trans->rx_mpdu_cmd)
                return len;
 
-       hdr = (void *)((u8 *)cmd + sizeof(struct iwl_cmd_header) +
-                       trans->rx_mpdu_cmd_hdr_size);
+       hdr_offset = sizeof(struct iwl_cmd_header) +
+                    trans->rx_mpdu_cmd_hdr_size;
+
+       if (out_hdr_offset)
+               *out_hdr_offset = hdr_offset;
+
+       hdr = (void *)((u8 *)cmd + hdr_offset);
        if (!ieee80211_is_data(hdr->frame_control))
                return len;
        /* maybe try to identify EAPOL frames? */
index c0631255aee7ca3dedf6a5168856f34d3a20401d..ba41d23b421194f0d9203a8a60d46fb8b985f696 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -173,12 +168,12 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
 {
        int i;
 
-       kfree(drv->fw.dbg_dest_tlv);
-       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++)
-               kfree(drv->fw.dbg_conf_tlv[i]);
-       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
-               kfree(drv->fw.dbg_trigger_tlv[i]);
-       kfree(drv->fw.dbg_mem_tlv);
+       kfree(drv->fw.dbg.dest_tlv);
+       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++)
+               kfree(drv->fw.dbg.conf_tlv[i]);
+       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++)
+               kfree(drv->fw.dbg.trigger_tlv[i]);
+       kfree(drv->fw.dbg.mem_tlv);
        kfree(drv->fw.iml);
 
        for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
@@ -308,7 +303,7 @@ struct iwl_firmware_pieces {
        struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
        size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
        struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
-       size_t n_dbg_mem_tlv;
+       size_t n_mem_tlv;
 };
 
 /*
@@ -941,7 +936,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        IWL_INFO(drv, "Found debug destination: %s\n",
                                 get_fw_dbg_mode_string(mon_mode));
 
-                       drv->fw.dbg_dest_reg_num = (dest_v1) ?
+                       drv->fw.dbg.n_dest_reg = (dest_v1) ?
                                tlv_len -
                                offsetof(struct iwl_fw_dbg_dest_tlv_v1,
                                         reg_ops) :
@@ -949,8 +944,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                                offsetof(struct iwl_fw_dbg_dest_tlv,
                                         reg_ops);
 
-                       drv->fw.dbg_dest_reg_num /=
-                               sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]);
+                       drv->fw.dbg.n_dest_reg /=
+                               sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]);
 
                        break;
                        }
@@ -964,7 +959,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                                break;
                        }
 
-                       if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) {
+                       if (conf->id >= ARRAY_SIZE(drv->fw.dbg.conf_tlv)) {
                                IWL_ERR(drv,
                                        "Skip unknown configuration: %d\n",
                                        conf->id);
@@ -993,7 +988,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                                (void *)tlv_data;
                        u32 trigger_id = le32_to_cpu(trigger->id);
 
-                       if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) {
+                       if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) {
                                IWL_ERR(drv,
                                        "Skip unknown trigger: %u\n",
                                        trigger->id);
@@ -1020,7 +1015,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                                break;
                        }
 
-                       drv->fw.dbg_dump_mask =
+                       drv->fw.dbg.dump_mask =
                                le32_to_cpup((__le32 *)tlv_data);
                        break;
                        }
@@ -1065,38 +1060,23 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                case IWL_UCODE_TLV_FW_MEM_SEG: {
                        struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
                                (void *)tlv_data;
-                       u32 type;
                        size_t size;
                        struct iwl_fw_dbg_mem_seg_tlv *n;
 
                        if (tlv_len != (sizeof(*dbg_mem)))
                                goto invalid_tlv_len;
 
-                       type = le32_to_cpu(dbg_mem->data_type);
-
                        IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
                                       dbg_mem->data_type);
 
-                       switch (type & FW_DBG_MEM_TYPE_MASK) {
-                       case FW_DBG_MEM_TYPE_REGULAR:
-                       case FW_DBG_MEM_TYPE_PRPH:
-                               /* we know how to handle these */
-                               break;
-                       default:
-                               IWL_ERR(drv,
-                                       "Found debug memory segment with invalid type: 0x%x\n",
-                                       type);
-                               return -EINVAL;
-                       }
-
                        size = sizeof(*pieces->dbg_mem_tlv) *
-                              (pieces->n_dbg_mem_tlv + 1);
+                              (pieces->n_mem_tlv + 1);
                        n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL);
                        if (!n)
                                return -ENOMEM;
                        pieces->dbg_mem_tlv = n;
-                       pieces->dbg_mem_tlv[pieces->n_dbg_mem_tlv] = *dbg_mem;
-                       pieces->n_dbg_mem_tlv++;
+                       pieces->dbg_mem_tlv[pieces->n_mem_tlv] = *dbg_mem;
+                       pieces->n_mem_tlv++;
                        break;
                        }
                case IWL_UCODE_TLV_IML: {
@@ -1275,8 +1255,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        fw->ucode_capa.standard_phy_calibration_size =
                        IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
        fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
-       /* dump all fw memory areas by default */
-       fw->dbg_dump_mask = 0xffffffff;
+       /* dump all fw memory areas by default except d3 debug data */
+       fw->dbg.dump_mask = 0xfffdffff;
 
        pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
        if (!pieces)
@@ -1343,21 +1323,21 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                        goto out_free_fw;
 
        if (pieces->dbg_dest_tlv_init) {
-               size_t dbg_dest_size = sizeof(*drv->fw.dbg_dest_tlv) +
-                       sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
-                       drv->fw.dbg_dest_reg_num;
+               size_t dbg_dest_size = sizeof(*drv->fw.dbg.dest_tlv) +
+                       sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
+                       drv->fw.dbg.n_dest_reg;
 
-               drv->fw.dbg_dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
+               drv->fw.dbg.dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
 
-               if (!drv->fw.dbg_dest_tlv)
+               if (!drv->fw.dbg.dest_tlv)
                        goto out_free_fw;
 
                if (*pieces->dbg_dest_ver == 0) {
-                       memcpy(drv->fw.dbg_dest_tlv, pieces->dbg_dest_tlv_v1,
+                       memcpy(drv->fw.dbg.dest_tlv, pieces->dbg_dest_tlv_v1,
                               dbg_dest_size);
                } else {
                        struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv =
-                               drv->fw.dbg_dest_tlv;
+                               drv->fw.dbg.dest_tlv;
 
                        dest_tlv->version = pieces->dbg_dest_tlv->version;
                        dest_tlv->monitor_mode =
@@ -1372,8 +1352,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                                pieces->dbg_dest_tlv->base_shift;
                        memcpy(dest_tlv->reg_ops,
                               pieces->dbg_dest_tlv->reg_ops,
-                              sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
-                              drv->fw.dbg_dest_reg_num);
+                              sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
+                              drv->fw.dbg.n_dest_reg);
 
                        /* In version 1 of the destination tlv, which is
                         * relevant for internal buffer exclusively,
@@ -1389,15 +1369,13 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                }
        }
 
-       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) {
+       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) {
                if (pieces->dbg_conf_tlv[i]) {
-                       drv->fw.dbg_conf_tlv_len[i] =
-                               pieces->dbg_conf_tlv_len[i];
-                       drv->fw.dbg_conf_tlv[i] =
+                       drv->fw.dbg.conf_tlv[i] =
                                kmemdup(pieces->dbg_conf_tlv[i],
-                                       drv->fw.dbg_conf_tlv_len[i],
+                                       pieces->dbg_conf_tlv_len[i],
                                        GFP_KERNEL);
-                       if (!drv->fw.dbg_conf_tlv[i])
+                       if (!pieces->dbg_conf_tlv_len[i])
                                goto out_free_fw;
                }
        }
@@ -1424,7 +1402,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] =
                sizeof(struct iwl_fw_dbg_trigger_tdls);
 
-       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
+       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) {
                if (pieces->dbg_trigger_tlv[i]) {
                        /*
                         * If the trigger isn't long enough, WARN and exit.
@@ -1437,22 +1415,22 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                                    (trigger_tlv_sz[i] +
                                     sizeof(struct iwl_fw_dbg_trigger_tlv))))
                                goto out_free_fw;
-                       drv->fw.dbg_trigger_tlv_len[i] =
+                       drv->fw.dbg.trigger_tlv_len[i] =
                                pieces->dbg_trigger_tlv_len[i];
-                       drv->fw.dbg_trigger_tlv[i] =
+                       drv->fw.dbg.trigger_tlv[i] =
                                kmemdup(pieces->dbg_trigger_tlv[i],
-                                       drv->fw.dbg_trigger_tlv_len[i],
+                                       drv->fw.dbg.trigger_tlv_len[i],
                                        GFP_KERNEL);
-                       if (!drv->fw.dbg_trigger_tlv[i])
+                       if (!drv->fw.dbg.trigger_tlv[i])
                                goto out_free_fw;
                }
        }
 
        /* Now that we can no longer fail, copy information */
 
-       drv->fw.dbg_mem_tlv = pieces->dbg_mem_tlv;
+       drv->fw.dbg.mem_tlv = pieces->dbg_mem_tlv;
        pieces->dbg_mem_tlv = NULL;
-       drv->fw.n_dbg_mem_tlv = pieces->n_dbg_mem_tlv;
+       drv->fw.dbg.n_mem_tlv = pieces->n_mem_tlv;
 
        /*
         * The (size - 16) / 12 formula is based on the information recorded
@@ -1493,6 +1471,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                break;
        default:
                WARN(1, "Invalid fw type %d\n", fw->type);
+               /* fall through */
        case IWL_FW_MVM:
                op = &iwlwifi_opmode_table[MVM_OP_MODE];
                break;
index 1f8a2eeb7dff222cccadadb947196237ec645e7e..2be30af7bdc30861bea107a2e941dc29ab2c57ec 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index a4c96215933ba589d2bbbc272728c901c80a4214..4e3422a1c7bbbe7a59e3666f9dd238c5da39f6c1 100644 (file)
@@ -18,9 +18,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program;
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -745,7 +742,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
        else
                rx_chains = hweight8(rx_chains);
 
-       if (!(data->sku_cap_11n_enable) || !cfg->ht_params) {
+       if (!(data->sku_cap_11n_enable) ||
+           (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) ||
+           !cfg->ht_params) {
                ht_info->ht_supported = false;
                return;
        }
index 8be50ed12300f5b16df34be38c9ad7d7ca5b41eb..d910bda087f7c97a16e1ada81eebc48040345123 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index ac965c34a2f89b44cee3575129babcd6239db1f8..a6db6a814257d922f8897786eb1692de454e8689 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 1ed78be06c232fa843e3e291dd0e43c377640047..47fced159800ca270aa87c347dd71ba9274902f4 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index df0e9ffff7067e86c158e587028f6e250ba6643a..c6a534303936445e6d2d7f0ce543461db32b81fa 100644 (file)
@@ -18,9 +18,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -68,6 +65,8 @@
 #include <linux/types.h>
 #include <linux/bitfield.h>
 
+#include "iwl-trans.h"
+
 /****************************/
 /* Flow Handler Definitions */
 /****************************/
index efb1998dcabd44df0ec44e8e3fbd8f80ae01e956..4f10914f60482c44e1d79f9f532befc0538680d9 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 5c8c0e130194321d71becfd59189f86f3f93cfba..38085850a2d308d4c8385d768ad7280b1fb94246 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
index 97072cf75bca5a7d95a88dab4bf69c5d963b083a..6fc8dac4aab772a47b8aebe1e9bd9e878b2758f8 100644 (file)
@@ -17,9 +17,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program;
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 27db4a3ba1f80a6586dcd9dfba7cf51ce0c7d6f7..96e101d79662b4cd5018128516eabff09cefb369 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -1340,6 +1335,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
        bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
                                fw_has_capa(&fw->ucode_capa,
                                            IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+       bool empty_otp;
        u32 mac_flags;
        u32 sbands_flags = 0;
 
@@ -1355,7 +1351,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
        }
 
        rsp = (void *)hcmd.resp_pkt->data;
-       if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP)
+       empty_otp = !!(le32_to_cpu(rsp->general.flags) &
+                      NVM_GENERAL_FLAGS_EMPTY_OTP);
+       if (empty_otp)
                IWL_INFO(trans, "OTP is empty\n");
 
        nvm = kzalloc(sizeof(*nvm) +
@@ -1379,6 +1377,11 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
 
        /* Initialize general data */
        nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
+       nvm->n_hw_addrs = rsp->general.n_hw_addrs;
+       if (nvm->n_hw_addrs == 0)
+               IWL_WARN(trans,
+                        "Firmware declares no reserved mac addresses. OTP is empty: %d\n",
+                        empty_otp);
 
        /* Initialize MAC sku data */
        mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags);
index 234d1009a9de40a81a660cfeb8bf77d5753ab0fd..b7e1ddf8f177b30ca888094b026917e8fdec5129 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index b49eda8150bb99f5ede8168251ebad4d29832201..cbd1a8eed620ea42aedc060687a476f4db2bf8d4 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -35,6 +31,7 @@
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b7cd813ba70f1b9caf782e98166331b91073b005..ae83cfdb750e663ea51322699f58742d277af456 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index d34de3f71db60d26235e859b70884cf6688fb0a6..7020dca05221428a5d344c6ceb524b2eba20862f 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 421a869633a32d861919dc80f11f23500f398ace..0f51c7bea8d0b96d625453aa9e4aab2c4aaa5b3f 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 99b43da32adfc17d3ec608ef4ff055ec47d641b4..9f11f391281638f0e01477157304b31878509f3f 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 7e9c924e1220ed3cb7bbf4ed6a8ada14e52ef564..727f73e0b3f10a09606d1f89a13463063600eb54 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 279dd7b7a3fb921c615dcab4fe769f926f7e896d..26b3c73051ca44d046c1933e774679cb3e72eadd 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -269,6 +264,7 @@ struct iwl_rx_cmd_buffer {
        bool _page_stolen;
        u32 _rx_page_order;
        unsigned int truesize;
+       u8 status;
 };
 
 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
@@ -538,9 +534,6 @@ struct iwl_trans_rxq_dma_data {
  * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
  *     TX'ed commands and similar. The buffer will be vfree'd by the caller.
  *     Note that the transport must fill in the proper file headers.
- * @dump_regs: dump using IWL_ERR configuration space and memory mapped
- *     registers of the device to diagnose failure, e.g., when HW becomes
- *     inaccessible.
  */
 struct iwl_trans_ops {
 
@@ -569,7 +562,7 @@ struct iwl_trans_ops {
                            bool configure_scd);
        /* 22000 functions */
        int (*txq_alloc)(struct iwl_trans *trans,
-                        struct iwl_tx_queue_cfg_cmd *cmd,
+                        __le16 flags, u8 sta_id, u8 tid,
                         int cmd_id, int size,
                         unsigned int queue_wdg_timeout);
        void (*txq_free)(struct iwl_trans *trans, int queue);
@@ -611,8 +604,6 @@ struct iwl_trans_ops {
        struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
                                                 const struct iwl_fw_dbg_trigger_tlv
                                                 *trigger);
-
-       void (*dump_regs)(struct iwl_trans *trans);
 };
 
 /**
@@ -688,6 +679,19 @@ enum iwl_plat_pm_mode {
  * enter/exit (in msecs).
  */
 #define IWL_TRANS_IDLE_TIMEOUT 2000
+#define IWL_MAX_DEBUG_ALLOCATIONS      1
+
+/**
+ * struct iwl_dram_data
+ * @physical: page phy pointer
+ * @block: pointer to the allocated block/page
+ * @size: size of the block/page
+ */
+struct iwl_dram_data {
+       dma_addr_t physical;
+       void *block;
+       int size;
+};
 
 /**
  * struct iwl_trans - transport common data
@@ -721,7 +725,9 @@ enum iwl_plat_pm_mode {
  * @dbg_dest_tlv: points to the destination TLV for debug
  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
  * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
- * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
+ * @dbg_n_dest_reg: num of reg_ops in %dbg_dest_tlv
+ * @num_blocks: number of blocks in fw_mon
+ * @fw_mon: address of the buffers for firmware monitor
  * @system_pm_mode: the system-wide power management mode in use.
  *     This mode is set dynamically, depending on the WoWLAN values
  *     configured from the userspace at runtime.
@@ -772,7 +778,9 @@ struct iwl_trans {
        const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
        struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
        u32 dbg_dump_mask;
-       u8 dbg_dest_reg_num;
+       u8 dbg_n_dest_reg;
+       int num_blocks;
+       struct iwl_dram_data fw_mon[IWL_MAX_DEBUG_ALLOCATIONS];
 
        enum iwl_plat_pm_mode system_pm_mode;
        enum iwl_plat_pm_mode runtime_pm_mode;
@@ -897,12 +905,6 @@ iwl_trans_dump_data(struct iwl_trans *trans,
        return trans->ops->dump_data(trans, trigger);
 }
 
-static inline void iwl_trans_dump_regs(struct iwl_trans *trans)
-{
-       if (trans->ops->dump_regs)
-               trans->ops->dump_regs(trans);
-}
-
 static inline struct iwl_device_cmd *
 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
 {
@@ -985,7 +987,7 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue)
 
 static inline int
 iwl_trans_txq_alloc(struct iwl_trans *trans,
-                   struct iwl_tx_queue_cfg_cmd *cmd,
+                   __le16 flags, u8 sta_id, u8 tid,
                    int cmd_id, int size,
                    unsigned int wdg_timeout)
 {
@@ -999,7 +1001,8 @@ iwl_trans_txq_alloc(struct iwl_trans *trans,
                return -EIO;
        }
 
-       return trans->ops->txq_alloc(trans, cmd, cmd_id, size, wdg_timeout);
+       return trans->ops->txq_alloc(trans, flags, sta_id, tid,
+                                    cmd_id, size, wdg_timeout);
 }
 
 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
index 75d35f6b041ef50ed81a020a5d007b9f8746da71..4094a41580323b03f2149990b1f216bb8e8da889 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 016e03a5034f6010ad73aef10df05cc85cc09440..730e37744dc0243e5040772743c6ef8119a097d7 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -331,7 +326,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        struct ieee80211_chanctx_conf *chanctx_conf;
        /* default smps_mode is AUTOMATIC - only used for client modes */
        enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
-       u32 bt_activity_grading;
+       u32 bt_activity_grading, min_ag_for_static_smps;
        int ave_rssi;
 
        lockdep_assert_held(&mvm->mutex);
@@ -363,8 +358,13 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
                return;
        }
 
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
+               min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
+       else
+               min_ag_for_static_smps = BT_HIGH_TRAFFIC;
+
        bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
-       if (bt_activity_grading >= BT_HIGH_TRAFFIC)
+       if (bt_activity_grading >= min_ag_for_static_smps)
                smps_mode = IEEE80211_SMPS_STATIC;
        else if (bt_activity_grading >= BT_LOW_TRAFFIC)
                smps_mode = IEEE80211_SMPS_DYNAMIC;
@@ -691,6 +691,15 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
        return bt_activity >= BT_LOW_TRAFFIC;
 }
 
+u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants)
+{
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) &&
+           (mvm->cfg->non_shared_ant & enabled_ants))
+               return mvm->cfg->non_shared_ant;
+
+       return first_antenna(enabled_ants);
+}
+
 u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                           struct ieee80211_tx_info *info, u8 ac)
 {
index d61ff66ce07b02660d7ad6ab118899d2e61ccdc1..d96ada3c06fc5428bed005698718c18db860b7c5 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 79bdae994822844bcc07bb8b63b617f59982d042..210be26aadaacb93e70c719cef676d9c2db743b2 100644 (file)
@@ -434,23 +434,13 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        u8 chains_static, chains_dynamic;
        struct cfg80211_chan_def chandef;
        int ret, i;
-       struct iwl_binding_cmd binding_cmd = {};
+       struct iwl_binding_cmd_v1 binding_cmd = {};
        struct iwl_time_quota_cmd quota_cmd = {};
        struct iwl_time_quota_data *quota;
        u32 status;
-       int size;
-
-       if (fw_has_capa(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
-               size = sizeof(binding_cmd);
-               if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ ||
-                   !iwl_mvm_is_cdb_supported(mvm))
-                       binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
-               else
-                       binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
-       } else {
-               size = IWL_BINDING_CMD_SIZE_V1;
-       }
+
+       if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm)))
+               return -EINVAL;
 
        /* add back the PHY */
        if (WARN_ON(!mvmvif->phy_ctxt))
@@ -497,7 +487,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        status = 0;
        ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
-                                         size, &binding_cmd, &status);
+                                         IWL_BINDING_CMD_SIZE_V1, &binding_cmd,
+                                         &status);
        if (ret) {
                IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
                return ret;
@@ -1042,7 +1033,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
         * the recording automatically before entering D3.  This can
         * be removed once the FW starts doing that.
         */
-       iwl_fw_dbg_stop_recording(&mvm->fwrt);
+       _iwl_fw_dbg_stop_recording(mvm->fwrt.trans, NULL);
 
        /* must be last -- this switches firmware state */
        ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
@@ -1362,7 +1353,7 @@ static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
                                   struct ieee80211_key_conf *key,
                                   struct iwl_wowlan_status *status)
 {
-       union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
+       union iwl_all_tsc_rsc *rsc = &status->gtk[0].rsc.all_tsc_rsc;
 
        switch (key->cipher) {
        case WLAN_CIPHER_SUITE_CCMP:
@@ -1419,7 +1410,8 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
         */
        if (sta) {
                struct ieee80211_key_seq seq = {};
-               union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
+               union iwl_all_tsc_rsc *sc =
+                       &data->status->gtk[0].rsc.all_tsc_rsc;
 
                if (data->find_phase)
                        return;
@@ -1501,22 +1493,24 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
                        u8 key[32];
                } conf = {
                        .conf.cipher = gtkdata.cipher,
-                       .conf.keyidx = status->gtk.key_index,
+                       .conf.keyidx =
+                               iwlmvm_wowlan_gtk_idx(&status->gtk[0]),
                };
+               __be64 replay_ctr;
 
                switch (gtkdata.cipher) {
                case WLAN_CIPHER_SUITE_CCMP:
                        conf.conf.keylen = WLAN_KEY_LEN_CCMP;
-                       memcpy(conf.conf.key, status->gtk.decrypt_key,
+                       memcpy(conf.conf.key, status->gtk[0].key,
                               WLAN_KEY_LEN_CCMP);
                        break;
                case WLAN_CIPHER_SUITE_TKIP:
                        conf.conf.keylen = WLAN_KEY_LEN_TKIP;
-                       memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
+                       memcpy(conf.conf.key, status->gtk[0].key, 16);
                        /* leave TX MIC key zeroed, we don't use it anyway */
                        memcpy(conf.conf.key +
                               NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
-                              status->gtk.tkip_mic_key, 8);
+                              status->gtk[0].tkip_mic_key, 8);
                        break;
                }
 
@@ -1524,11 +1518,10 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
                if (IS_ERR(key))
                        return false;
                iwl_mvm_set_key_rx_seq(mvm, key, status);
-       }
 
-       if (status->num_of_gtk_rekeys) {
-               __be64 replay_ctr =
+               replay_ctr =
                        cpu_to_be64(le64_to_cpu(status->replay_ctr));
+
                ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
                                           (void *)&replay_ctr, GFP_KERNEL);
        }
@@ -1541,6 +1534,107 @@ out:
        return true;
 }
 
+struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
+{
+       struct iwl_wowlan_status *v7, *status;
+       struct iwl_host_cmd cmd = {
+               .id = WOWLAN_GET_STATUSES,
+               .flags = CMD_WANT_SKB,
+       };
+       int ret, len, status_size;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (ret) {
+               IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret);
+               return ERR_PTR(ret);
+       }
+
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
+               struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data;
+               int data_size;
+
+               status_size = sizeof(*v6);
+               len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+
+               if (len < status_size) {
+                       IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+                       status = ERR_PTR(-EIO);
+                       goto out_free_resp;
+               }
+
+               data_size = ALIGN(le32_to_cpu(v6->wake_packet_bufsize), 4);
+
+               if (len != (status_size + data_size)) {
+                       IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+                       status = ERR_PTR(-EIO);
+                       goto out_free_resp;
+               }
+
+               status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL);
+               if (!status)
+                       goto out_free_resp;
+
+               BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) >
+                            sizeof(status->gtk[0].key));
+               BUILD_BUG_ON(sizeof(v6->gtk.tkip_mic_key) >
+                            sizeof(status->gtk[0].tkip_mic_key));
+
+               /* copy GTK info to the right place */
+               memcpy(status->gtk[0].key, v6->gtk.decrypt_key,
+                      sizeof(v6->gtk.decrypt_key));
+               memcpy(status->gtk[0].tkip_mic_key, v6->gtk.tkip_mic_key,
+                      sizeof(v6->gtk.tkip_mic_key));
+               memcpy(&status->gtk[0].rsc, &v6->gtk.rsc,
+                      sizeof(status->gtk[0].rsc));
+
+               /* hardcode the key length to 16 since v6 only supports 16 */
+               status->gtk[0].key_len = 16;
+
+               /*
+                * The key index only uses 2 bits (values 0 to 3) and
+                * we always set bit 7 which means this is the
+                * currently used key.
+                */
+               status->gtk[0].key_flags = v6->gtk.key_index | BIT(7);
+
+               status->replay_ctr = v6->replay_ctr;
+
+               /* everything starting from pattern_number is identical */
+               memcpy(&status->pattern_number, &v6->pattern_number,
+                      offsetof(struct iwl_wowlan_status, wake_packet) -
+                      offsetof(struct iwl_wowlan_status, pattern_number) +
+                      data_size);
+
+               goto out_free_resp;
+       }
+
+       v7 = (void *)cmd.resp_pkt->data;
+       status_size = sizeof(*v7);
+       len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+
+       if (len < status_size) {
+               IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+               status = ERR_PTR(-EIO);
+               goto out_free_resp;
+       }
+
+       if (len != (status_size +
+                   ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4))) {
+               IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+               status = ERR_PTR(-EIO);
+               goto out_free_resp;
+       }
+
+       status = kmemdup(v7, len, GFP_KERNEL);
+
+out_free_resp:
+       iwl_free_resp(&cmd);
+       return status;
+}
+
 static struct iwl_wowlan_status *
 iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -1550,12 +1644,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                u32 valid;
                u32 error_id;
        } err_info;
-       struct iwl_host_cmd cmd = {
-               .id = WOWLAN_GET_STATUSES,
-               .flags = CMD_WANT_SKB,
-       };
-       struct iwl_wowlan_status *status, *fw_status;
-       int ret, len, status_size;
+       int ret;
 
        iwl_trans_read_mem_bytes(mvm->trans, base,
                                 &err_info, sizeof(err_info));
@@ -1578,34 +1667,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        if (ret)
                IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
 
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-       if (ret) {
-               IWL_ERR(mvm, "failed to query status (%d)\n", ret);
-               return ERR_PTR(ret);
-       }
-
-       status_size = sizeof(*fw_status);
-
-       len = iwl_rx_packet_payload_len(cmd.resp_pkt);
-       if (len < status_size) {
-               IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
-               fw_status = ERR_PTR(-EIO);
-               goto out_free_resp;
-       }
-
-       status = (void *)cmd.resp_pkt->data;
-       if (len != (status_size +
-                   ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
-               IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
-               fw_status = ERR_PTR(-EIO);
-               goto out_free_resp;
-       }
-
-       fw_status = kmemdup(status, len, GFP_KERNEL);
-
-out_free_resp:
-       iwl_free_resp(&cmd);
-       return fw_status;
+       return iwl_mvm_send_wowlan_get_status(mvm);
 }
 
 /* releases the MVM mutex */
@@ -1883,6 +1945,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
                goto err;
        }
 
+       iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
        /* query SRAM first in case we want event logging */
        iwl_mvm_read_d3_sram(mvm);
 
@@ -2117,6 +2180,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
 
        mvm->d3_test_active = false;
 
+       iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
+
        rtnl_lock();
        __iwl_mvm_resume(mvm, true);
        rtnl_unlock();
index 798605c4f1227d7f4e34367456dcb58885cda2ae..1aa6c7e930888e46593e95e4439b52c4ee370e14 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 05b77419953ce6250f77f573b173b92660d5bc33..3b6b3d8fb96111391cb0c27b90044164992a4607 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -671,16 +666,11 @@ iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
        };
        int ret, bt_force_ant_mode;
 
-       for (bt_force_ant_mode = 0;
-            bt_force_ant_mode < ARRAY_SIZE(modes_str);
-            bt_force_ant_mode++) {
-               if (!strcmp(buf, modes_str[bt_force_ant_mode]))
-                       break;
-       }
-
-       if (bt_force_ant_mode >= ARRAY_SIZE(modes_str))
-               return -EINVAL;
+       ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf);
+       if (ret < 0)
+               return ret;
 
+       bt_force_ant_mode = ret;
        ret = 0;
        mutex_lock(&mvm->mutex);
        if (mvm->bt_force_ant_mode == bt_force_ant_mode)
@@ -1732,6 +1722,35 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
        return ret ?: count;
 }
 
+static ssize_t
+iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
+                       size_t count, loff_t *ppos)
+{
+       struct iwl_he_monitor_cmd he_mon_cmd = {};
+       u32 aid;
+       int ret;
+
+       if (!iwl_mvm_firmware_running(mvm))
+               return -EIO;
+
+       ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid,
+                    &he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1],
+                    &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3],
+                    &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]);
+       if (ret != 7)
+               return -EINVAL;
+
+       he_mon_cmd.aid = cpu_to_le16(aid);
+
+       mutex_lock(&mvm->mutex);
+       ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD,
+                                                  DATA_PATH_GROUP, 0), 0,
+                                  sizeof(he_mon_cmd), &he_mon_cmd);
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
 static ssize_t
 iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf,
                                  size_t count, loff_t *ppos)
@@ -1801,6 +1820,8 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
 MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
 #endif
 
+MVM_DEBUGFS_WRITE_FILE_OPS(he_sniffer_params, 32);
+
 static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
                                  size_t count, loff_t *ppos)
 {
@@ -1989,6 +2010,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
 #ifdef CONFIG_ACPI
        MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400);
 #endif
+       MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0200);
 
        if (!debugfs_create_bool("enable_scan_iteration_notif",
                                 0600,
index ede6ef8d390e2ad5fa5aa045fc8da69e1ab2e6aa..a83d252c060249aa8e094e6d875d774faec34123 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index e8e74dd558f77944128b5bde4fd9c73ac569c5e0..143c7fcaea41198d3108139c3b8e7763240ef714 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 6bb1a99a197a22981f29962ab487cccb76dd0eb8..c5df73231ba37ed8f1cd79bbd2c0c061855b2b7b 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -304,6 +299,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
        enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
        static const u16 alive_cmd[] = { MVM_ALIVE };
 
+       set_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
        if (ucode_type == IWL_UCODE_REGULAR &&
            iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
            !(fw_has_capa(&mvm->fw->ucode_capa,
@@ -374,6 +370,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
                atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
 
        set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+       clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
 
        return 0;
 }
@@ -704,8 +701,12 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
        enabled = !!(wifi_pkg->package.elements[1].integer.value);
        n_profiles = wifi_pkg->package.elements[2].integer.value;
 
-       /* in case of BIOS bug */
-       if (n_profiles <= 0) {
+       /*
+        * Check the validity of n_profiles.  The EWRD profiles start
+        * from index 1, so the maximum value allowed here is
+        * ACPI_SAR_PROFILES_NUM - 1.
+        */
+       if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
                ret = -EINVAL;
                goto out_free;
        }
@@ -773,19 +774,28 @@ out_free:
 
 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
 {
-       struct iwl_dev_tx_power_cmd cmd = {
-               .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
-       };
+       union {
+               struct iwl_dev_tx_power_cmd v5;
+               struct iwl_dev_tx_power_cmd_v4 v4;
+       } cmd;
        int i, j, idx;
        int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
-       int len = sizeof(cmd);
+       int len;
 
        BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
        BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
                     ACPI_SAR_TABLE_SIZE);
 
-       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
-               len = sizeof(cmd.v3);
+       cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
+
+       if (fw_has_api(&mvm->fw->ucode_capa,
+                      IWL_UCODE_TLV_API_REDUCE_TX_POWER))
+               len = sizeof(cmd.v5);
+       else if (fw_has_capa(&mvm->fw->ucode_capa,
+                            IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
+               len = sizeof(cmd.v4);
+       else
+               len = sizeof(cmd.v4.v3);
 
        for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
                struct iwl_mvm_sar_profile *prof;
@@ -812,7 +822,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
                IWL_DEBUG_RADIO(mvm, "  Chain[%d]:\n", i);
                for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
                        idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
-                       cmd.v3.per_chain_restriction[i][j] =
+                       cmd.v5.v3.per_chain_restriction[i][j] =
                                cpu_to_le16(prof->table[idx]);
                        IWL_DEBUG_RADIO(mvm, "    Band[%d] = %d * .125dBm\n",
                                        j, prof->table[idx]);
@@ -1018,7 +1028,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
 
        mvm->fwrt.dump.conf = FW_DBG_INVALID;
        /* if we have a destination, assume EARLY START */
-       if (mvm->fw->dbg_dest_tlv)
+       if (mvm->fw->dbg.dest_tlv)
                mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
        iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
 
index b27269504a622f3fff33b2e3736c13720af53e13..9bb1de1cad64af631af6241c28612dc86efb9642 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index b3fd20502abb3c604352fbc9d5aa84256f40a4f6..6486cfb33f403d478680aca1ae489a843e81e24e 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -35,6 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -85,6 +82,10 @@ const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
        IWL_GEN2_EDCA_TX_FIFO_VI,
        IWL_GEN2_EDCA_TX_FIFO_BE,
        IWL_GEN2_EDCA_TX_FIFO_BK,
+       IWL_GEN2_TRIG_TX_FIFO_VO,
+       IWL_GEN2_TRIG_TX_FIFO_VI,
+       IWL_GEN2_TRIG_TX_FIFO_BE,
+       IWL_GEN2_TRIG_TX_FIFO_BK,
 };
 
 struct iwl_mvm_mac_iface_iterator_data {
@@ -1486,12 +1487,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
             IWL_MVM_MISSED_BEACONS_THRESHOLD)
                ieee80211_beacon_loss(vif);
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw,
-                                       FW_DBG_TRIGGER_MISSED_BEACONS))
+       trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+                                       FW_DBG_TRIGGER_MISSED_BEACONS);
+       if (!trigger)
                return;
 
-       trigger = iwl_fw_dbg_get_trigger(mvm->fw,
-                                        FW_DBG_TRIGGER_MISSED_BEACONS);
        bcon_trig = (void *)trigger->data;
        stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
        stop_trig_missed_bcon_since_rx =
@@ -1499,11 +1499,6 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
 
        /* TODO: implement start trigger */
 
-       if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
-                                          ieee80211_vif_to_wdev(vif),
-                                          trigger))
-               return;
-
        if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
            rx_missed_bcon >= stop_trig_missed_bcon)
                iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
@@ -1568,6 +1563,65 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
        ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
 }
 
+static void iwl_mvm_probe_resp_data_iter(void *_data, u8 *mac,
+                                        struct ieee80211_vif *vif)
+{
+       struct iwl_probe_resp_data_notif *notif = _data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_probe_resp_data *old_data, *new_data;
+
+       if (mvmvif->id != (u16)le32_to_cpu(notif->mac_id))
+               return;
+
+       new_data = kzalloc(sizeof(*new_data), GFP_KERNEL);
+       if (!new_data)
+               return;
+
+       memcpy(&new_data->notif, notif, sizeof(new_data->notif));
+
+       /* noa_attr contains 1 reserved byte, need to substruct it */
+       new_data->noa_len = sizeof(struct ieee80211_vendor_ie) +
+                           sizeof(new_data->notif.noa_attr) - 1;
+
+       /*
+        * If it's a one time NoA, only one descriptor is needed,
+        * adjust the length according to len_low.
+        */
+       if (new_data->notif.noa_attr.len_low ==
+           sizeof(struct ieee80211_p2p_noa_desc) + 2)
+               new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc);
+
+       old_data = rcu_dereference_protected(mvmvif->probe_resp_data,
+                                       lockdep_is_held(&mvmvif->mvm->mutex));
+       rcu_assign_pointer(mvmvif->probe_resp_data, new_data);
+
+       if (old_data)
+               kfree_rcu(old_data, rcu_head);
+
+       if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA &&
+           notif->csa_counter >= 1)
+               ieee80211_csa_set_counter(vif, notif->csa_counter);
+}
+
+void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
+                                  struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
+       int len = iwl_rx_packet_payload_len(pkt);
+
+       if (WARN_ON_ONCE(len < sizeof(*notif)))
+               return;
+
+       IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n",
+                      notif->noa_active, notif->csa_counter);
+
+       ieee80211_iterate_active_interfaces(mvm->hw,
+                                           IEEE80211_IFACE_ITER_ACTIVE,
+                                           iwl_mvm_probe_resp_data_iter,
+                                           notif);
+}
+
 void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
                                      struct iwl_rx_cmd_buffer *rxb)
 {
index d46f3fbea46efce92fd7b0c416b89a468c8ef520..505b0385d80003e546578fa2a1d8a4684ea1a9dd 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -559,8 +554,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 
        hw->wiphy->max_remain_on_channel_duration = 10000;
        hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-       /* we can compensate an offset of up to 3 channels = 15 MHz */
-       hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
 
        /* Extract MAC address */
        memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@@ -864,16 +857,13 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        struct iwl_fw_dbg_trigger_tlv *trig;
        struct iwl_fw_dbg_trigger_ba *ba_trig;
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+       trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+                                    FW_DBG_TRIGGER_BA);
+       if (!trig)
                return;
 
-       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
        ba_trig = (void *)trig->data;
 
-       if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
-                                          ieee80211_vif_to_wdev(vif), trig))
-               return;
-
        switch (action) {
        case IEEE80211_AMPDU_TX_OPERATIONAL: {
                struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -1035,6 +1025,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
 
        mvmvif->phy_ctxt = NULL;
        memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
+       memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data));
 }
 
 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
@@ -1124,7 +1115,9 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
                 * would do.
                 */
                clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+#ifdef CONFIG_PM
                iwl_mvm_d0i3_enable_tx(mvm, NULL);
+#endif
        }
 
        return ret;
@@ -1162,7 +1155,9 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
        mutex_lock(&mvm->mutex);
 
        clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+#ifdef CONFIG_PM
        iwl_mvm_d0i3_enable_tx(mvm, NULL);
+#endif
        ret = iwl_mvm_update_quotas(mvm, true, NULL);
        if (ret)
                IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
@@ -1233,12 +1228,15 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
        iwl_mvm_del_aux_sta(mvm);
 
        /*
-        * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
-        * won't be called in this case).
+        * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
+        * hw (as restart_complete() won't be called in this case) and mac80211
+        * won't execute the restart.
         * But make sure to cleanup interfaces that have gone down before/during
         * HW restart was requested.
         */
-       if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+       if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+           test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+                              &mvm->status))
                ieee80211_iterate_interfaces(mvm->hw, 0,
                                             iwl_mvm_cleanup_iterator, mvm);
 
@@ -1308,19 +1306,28 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                s16 tx_power)
 {
-       struct iwl_dev_tx_power_cmd cmd = {
-               .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
-               .v3.mac_context_id =
+       int len;
+       union {
+               struct iwl_dev_tx_power_cmd v5;
+               struct iwl_dev_tx_power_cmd_v4 v4;
+       } cmd = {
+               .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+               .v5.v3.mac_context_id =
                        cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
-               .v3.pwr_restriction = cpu_to_le16(8 * tx_power),
+               .v5.v3.pwr_restriction = cpu_to_le16(8 * tx_power),
        };
-       int len = sizeof(cmd);
 
        if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
-               cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+               cmd.v5.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
 
-       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
-               len = sizeof(cmd.v3);
+       if (fw_has_api(&mvm->fw->ucode_capa,
+                      IWL_UCODE_TLV_API_REDUCE_TX_POWER))
+               len = sizeof(cmd.v5);
+       else if (fw_has_capa(&mvm->fw->ucode_capa,
+                            IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
+               len = sizeof(cmd.v4);
+       else
+               len = sizeof(cmd.v4.v3);
 
        return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
 }
@@ -1333,6 +1340,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
        int ret;
 
        mvmvif->mvm = mvm;
+       RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL);
 
        /*
         * make sure D0i3 exit is completed, otherwise a target access
@@ -1497,6 +1505,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_probe_resp_data *probe_data;
 
        iwl_mvm_prepare_mac_removal(mvm, vif);
 
@@ -1506,6 +1515,12 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       probe_data = rcu_dereference_protected(mvmvif->probe_resp_data,
+                                              lockdep_is_held(&mvm->mutex));
+       RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL);
+       if (probe_data)
+               kfree_rcu(probe_data, rcu_head);
+
        if (mvm->bf_allowed_vif == mvmvif) {
                mvm->bf_allowed_vif = NULL;
                vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
@@ -2455,6 +2470,9 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
 
        iwl_mvm_mac_ctxt_remove(mvm, vif);
 
+       kfree(mvmvif->ap_wep_key);
+       mvmvif->ap_wep_key = NULL;
+
        mutex_unlock(&mvm->mutex);
 }
 
@@ -2784,14 +2802,12 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
        struct iwl_fw_dbg_trigger_tlv *trig;
        struct iwl_fw_dbg_trigger_tdls *tdls_trig;
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
+       trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+                                    FW_DBG_TRIGGER_TDLS);
+       if (!trig)
                return;
 
-       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
        tdls_trig = (void *)trig->data;
-       if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
-                                          ieee80211_vif_to_wdev(vif), trig))
-               return;
 
        if (!(tdls_trig->action_bitmap & BIT(action)))
                return;
@@ -2927,7 +2943,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                        iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
                }
 
-               iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
+               iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+                                    false);
                ret = iwl_mvm_update_sta(mvm, vif, sta);
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTHORIZED) {
@@ -2943,9 +2960,16 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                /* enable beacon filtering */
                WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
 
-               iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
+               iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+                                    true);
 
-               ret = 0;
+               /* if wep is used, need to set the key for the station now */
+               if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key)
+                       ret = iwl_mvm_set_sta_key(mvm, vif, sta,
+                                                 mvmvif->ap_wep_key,
+                                                 STA_KEY_IDX_INVALID);
+               else
+                       ret = 0;
        } else if (old_state == IEEE80211_STA_AUTHORIZED &&
                   new_state == IEEE80211_STA_ASSOC) {
                /* disable beacon filtering */
@@ -3128,8 +3152,15 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
 
        switch (key->cipher) {
        case WLAN_CIPHER_SUITE_TKIP:
-               key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-               key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+               if (!mvm->trans->cfg->gen2) {
+                       key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+                       key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+               } else if (vif->type == NL80211_IFTYPE_STATION) {
+                       key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE;
+               } else {
+                       IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n");
+                       return -EOPNOTSUPP;
+               }
                break;
        case WLAN_CIPHER_SUITE_CCMP:
        case WLAN_CIPHER_SUITE_GCMP:
@@ -3144,13 +3175,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                break;
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
-               /* For non-client mode, only use WEP keys for TX as we probably
-                * don't have a station yet anyway and would then have to keep
-                * track of the keys, linking them to each of the clients/peers
-                * as they appear. For now, don't do that, for performance WEP
-                * offload doesn't really matter much, but we need it for some
-                * other offload features in client mode.
-                */
+               if (vif->type == NL80211_IFTYPE_AP) {
+                       struct iwl_mvm_vif *mvmvif =
+                               iwl_mvm_vif_from_mac80211(vif);
+
+                       mvmvif->ap_wep_key = kmemdup(key,
+                                                    sizeof(*key) + key->keylen,
+                                                    GFP_KERNEL);
+                       if (!mvmvif->ap_wep_key)
+                               return -ENOMEM;
+               }
+
                if (vif->type != NL80211_IFTYPE_STATION)
                        return 0;
                break;
@@ -4454,14 +4489,12 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
        struct iwl_fw_dbg_trigger_tlv *trig;
        struct iwl_fw_dbg_trigger_mlme *trig_mlme;
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+       trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+                                    FW_DBG_TRIGGER_MLME);
+       if (!trig)
                return;
 
-       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
        trig_mlme = (void *)trig->data;
-       if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
-                                          ieee80211_vif_to_wdev(vif), trig))
-               return;
 
        if (event->u.mlme.data == ASSOC_EVENT) {
                if (event->u.mlme.status == MLME_DENIED)
@@ -4496,14 +4529,12 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
        struct iwl_fw_dbg_trigger_tlv *trig;
        struct iwl_fw_dbg_trigger_ba *ba_trig;
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+       trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+                                    FW_DBG_TRIGGER_BA);
+       if (!trig)
                return;
 
-       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
        ba_trig = (void *)trig->data;
-       if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
-                                          ieee80211_vif_to_wdev(vif), trig))
-               return;
 
        if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
                return;
index b3987a0a70181ad2dc6775d044ccf90a3bbd8cf8..8f71eeed50d9586c1ee9fb73ba772b49225b1d92 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -335,6 +330,18 @@ struct iwl_mvm_vif_bf_data {
        int last_bt_coex_event;
 };
 
+/**
+ * struct iwl_probe_resp_data - data for NoA/CSA updates
+ * @rcu_head: used for freeing the data on update
+ * @notif: notification data
+ * @noa_len: length of NoA attribute, calculated from the notification
+ */
+struct iwl_probe_resp_data {
+       struct rcu_head rcu_head;
+       struct iwl_probe_resp_data_notif notif;
+       int noa_len;
+};
+
 /**
  * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
  * @id: between 0 and 3
@@ -365,6 +372,8 @@ struct iwl_mvm_vif_bf_data {
  *     average signal of beacons retrieved from the firmware
  * @csa_failed: CSA failed to schedule time event, report an error later
  * @features: hw features active for this vif
+ * @probe_resp_data: data from FW notification to store NOA and CSA related
+ *     data to be inserted into probe response.
  */
 struct iwl_mvm_vif {
        struct iwl_mvm *mvm;
@@ -460,6 +469,9 @@ struct iwl_mvm_vif {
 
        /* TCP Checksum Offload */
        netdev_features_t features;
+
+       struct iwl_probe_resp_data __rcu *probe_resp_data;
+       struct ieee80211_key_conf *ap_wep_key;
 };
 
 static inline struct iwl_mvm_vif *
@@ -1229,6 +1241,11 @@ static inline bool iwl_mvm_is_oce_supported(struct iwl_mvm *mvm)
        return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_OCE);
 }
 
+static inline bool iwl_mvm_is_frag_ebs_supported(struct iwl_mvm *mvm)
+{
+       return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAG_EBS);
+}
+
 static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
 {
        /* For now we only use this mode to differentiate between
@@ -1602,6 +1619,8 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif);
 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
                                         struct ieee80211_vif *exclude_vif);
+void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
+                                  struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
                                      struct iwl_rx_cmd_buffer *rxb);
 /* Bindings */
@@ -1685,7 +1704,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
 
 /* rate scaling */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync);
 void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
 int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
 void rs_update_last_rssi(struct iwl_mvm *mvm,
@@ -1733,6 +1752,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
                                     struct ieee80211_vif *vif, int idx);
 extern const struct file_operations iwl_dbgfs_d3_test_ops;
+struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm);
 #ifdef CONFIG_PM
 int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                                     struct ieee80211_vif *vif,
@@ -1776,10 +1796,13 @@ void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm);
+
+#ifdef CONFIG_PM
 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
 int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode);
 int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode);
 int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
+#endif
 
 /* BT Coex */
 int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
@@ -1796,6 +1819,7 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
 bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
 bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
                                    enum nl80211_band band);
+u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants);
 u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                           struct ieee80211_tx_info *info, u8 ac);
 
index cf48517944ecf25f6876d0f07713f2fed7e8d8bc..3633f27d048ab7bd36ac99fc38cae1c1197bd0de 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -482,15 +477,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
        u32 status;
        int resp_len, n_channels;
        u16 mcc;
-       bool resp_v2 = fw_has_capa(&mvm->fw->ucode_capa,
-                                  IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
 
        if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
                return ERR_PTR(-EOPNOTSUPP);
 
        cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
-       if (!resp_v2)
-               cmd.len[0] = sizeof(struct iwl_mcc_update_cmd_v1);
 
        IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
                      alpha2[0], alpha2[1], src_id);
@@ -502,7 +493,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
        pkt = cmd.resp_pkt;
 
        /* Extract MCC response */
-       if (resp_v2) {
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) {
                struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
 
                n_channels =  __le32_to_cpu(mcc_resp->n_channels);
@@ -514,9 +506,9 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
                        goto exit;
                }
        } else {
-               struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data;
+               struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
 
-               n_channels =  __le32_to_cpu(mcc_resp_v1->n_channels);
+               n_channels =  __le32_to_cpu(mcc_resp_v3->n_channels);
                resp_len = sizeof(struct iwl_mcc_update_resp) +
                           n_channels * sizeof(__le32);
                resp_cp = kzalloc(resp_len, GFP_KERNEL);
@@ -525,12 +517,14 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
                        goto exit;
                }
 
-               resp_cp->status = mcc_resp_v1->status;
-               resp_cp->mcc = mcc_resp_v1->mcc;
-               resp_cp->cap = mcc_resp_v1->cap;
-               resp_cp->source_id = mcc_resp_v1->source_id;
-               resp_cp->n_channels = mcc_resp_v1->n_channels;
-               memcpy(resp_cp->channels, mcc_resp_v1->channels,
+               resp_cp->status = mcc_resp_v3->status;
+               resp_cp->mcc = mcc_resp_v3->mcc;
+               resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap);
+               resp_cp->source_id = mcc_resp_v3->source_id;
+               resp_cp->time = mcc_resp_v3->time;
+               resp_cp->geo_info = mcc_resp_v3->geo_info;
+               resp_cp->n_channels = mcc_resp_v3->n_channels;
+               memcpy(resp_cp->channels, mcc_resp_v3->channels,
                       n_channels * sizeof(__le32));
        }
 
index 6338d9cf7070814f1b96a6c90d973efbf622f55a..6d71e05626ad8094e55a2c5d7ee538bfc41a13ba 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 0e26619fb330b8282f5c923aae61adf971c4a119..0e2092526fae1d81623c08005bbf2f08ee8d7077 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -182,6 +177,9 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
        if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
                reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
 
+       if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
+               reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
+
        iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
                                CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
                                CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
@@ -189,7 +187,8 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
                                CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
                                CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
                                CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
-                               CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
+                               CSR_HW_IF_CONFIG_REG_BIT_MAC_SI   |
+                               CSR_HW_IF_CONFIG_REG_D3_DEBUG,
                                reg_val);
 
        IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
@@ -491,7 +490,9 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
 
 /* this forward declaration can avoid to export the function */
 static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+#ifdef CONFIG_PM
 static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
+#endif
 
 static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
 {
@@ -564,10 +565,23 @@ static bool iwl_mvm_fwrt_fw_running(void *ctx)
        return iwl_mvm_firmware_running(ctx);
 }
 
+static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
+{
+       struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
+       int ret;
+
+       mutex_lock(&mvm->mutex);
+       ret = iwl_mvm_send_cmd(mvm, host_cmd);
+       mutex_unlock(&mvm->mutex);
+
+       return ret;
+}
+
 static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
        .dump_start = iwl_mvm_fwrt_dump_start,
        .dump_end = iwl_mvm_fwrt_dump_end,
        .fw_running = iwl_mvm_fwrt_fw_running,
+       .send_hcmd = iwl_mvm_fwrt_send_hcmd,
 };
 
 static struct iwl_op_mode *
@@ -583,6 +597,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        };
        int err, scan_size;
        u32 min_backoff;
+       enum iwl_amsdu_size rb_size_default;
 
        /*
         * We use IWL_MVM_STATION_COUNT to check the validity of the station
@@ -602,9 +617,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        if (cfg->max_rx_agg_size)
                hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
+       else
+               hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
 
        if (cfg->max_tx_agg_size)
                hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
+       else
+               hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
 
        op_mode = hw->priv;
 
@@ -661,7 +680,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
        INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
+#ifdef CONFIG_PM
        INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
+#endif
        INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
        INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
        INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
@@ -691,8 +712,16 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        trans_cfg.op_mode = op_mode;
        trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
        trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+
+       if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+               rb_size_default = IWL_AMSDU_2K;
+       else
+               rb_size_default = IWL_AMSDU_4K;
+
        switch (iwlwifi_mod_params.amsdu_size) {
        case IWL_AMSDU_DEF:
+               trans_cfg.rx_buf_size = rb_size_default;
+               break;
        case IWL_AMSDU_4K:
                trans_cfg.rx_buf_size = IWL_AMSDU_4K;
                break;
@@ -705,16 +734,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        default:
                pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
                       iwlwifi_mod_params.amsdu_size);
-               trans_cfg.rx_buf_size = IWL_AMSDU_4K;
-       }
-
-       /* the hardware splits the A-MSDU */
-       if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
-               trans_cfg.rx_buf_size = IWL_AMSDU_2K;
-               /* TODO: remove when balanced power mode is fw supported */
-               iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM;
-       } else if (mvm->cfg->mq_rx_supported) {
-               trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+               trans_cfg.rx_buf_size = rb_size_default;
        }
 
        trans->wide_cmd_header = true;
@@ -745,12 +765,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        iwl_trans_configure(mvm->trans, &trans_cfg);
 
        trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
-       trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
-       trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
-       memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
+       trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv;
+       trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg;
+       memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv,
               sizeof(trans->dbg_conf_tlv));
-       trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
-       trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
+       trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
+       trans->dbg_dump_mask = mvm->fw->dbg.dump_mask;
 
        trans->iml = mvm->fw->iml;
        trans->iml_len = mvm->fw->iml_len;
@@ -781,6 +801,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        mutex_lock(&mvm->mutex);
        iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
        err = iwl_run_init_mvm_ucode(mvm, true);
+       if (test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status))
+               iwl_fw_alive_error_dump(&mvm->fwrt);
        if (!iwlmvm_mod_params.init_dbg || !err)
                iwl_mvm_stop_device(mvm);
        iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
@@ -950,15 +972,13 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
        struct iwl_fw_dbg_trigger_cmd *cmds_trig;
        int i;
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
+       trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
+                                    FW_DBG_TRIGGER_FW_NOTIF);
+       if (!trig)
                return;
 
-       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
        cmds_trig = (void *)trig->data;
 
-       if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
-               return;
-
        for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
                /* don't collect on CMD 0 */
                if (!cmds_trig->cmds[i].cmd_id)
@@ -1220,7 +1240,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
         */
        if (!mvm->fw_restart && fw_error) {
                iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
-                                       NULL);
+                                       NULL, 0);
        } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
                struct iwl_mvm_reprobe *reprobe;
 
@@ -1246,7 +1266,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
                INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
                schedule_work(&reprobe->work);
        } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
-                  mvm->hw_registered) {
+                  mvm->hw_registered &&
+                  !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
                /* don't let the transport/FW power down */
                iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
 
@@ -1261,7 +1282,8 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
 {
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
-       iwl_mvm_dump_nic_error_log(mvm);
+       if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status))
+               iwl_mvm_dump_nic_error_log(mvm);
 
        iwl_mvm_nic_restart(mvm, true);
 }
@@ -1274,6 +1296,7 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
        iwl_mvm_nic_restart(mvm, true);
 }
 
+#ifdef CONFIG_PM
 struct iwl_d0i3_iter_data {
        struct iwl_mvm *mvm;
        struct ieee80211_vif *connected_vif;
@@ -1596,25 +1619,23 @@ out:
 static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
 {
        struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
-       struct iwl_host_cmd get_status_cmd = {
-               .id = WOWLAN_GET_STATUSES,
-               .flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
-       };
        struct iwl_mvm_d0i3_exit_work_iter_data iter_data = {
                .mvm = mvm,
        };
 
        struct iwl_wowlan_status *status;
-       int ret;
        u32 wakeup_reasons = 0;
        __le16 *qos_seq = NULL;
 
        mutex_lock(&mvm->mutex);
-       ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
-       if (ret)
+
+       status = iwl_mvm_send_wowlan_get_status(mvm);
+       if (IS_ERR_OR_NULL(status)) {
+               /* set to NULL so we don't need to check before kfree'ing */
+               status = NULL;
                goto out;
+       }
 
-       status = (void *)get_status_cmd.resp_pkt->data;
        wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
        qos_seq = status->qos_seq_ctr;
 
@@ -1633,8 +1654,7 @@ out:
                       wakeup_reasons);
 
        /* qos_seq might point inside resp_pkt, so free it only now */
-       if (get_status_cmd.resp_pkt)
-               iwl_free_resp(&get_status_cmd);
+       kfree(status);
 
        /* the FW might have updated the regdomain */
        iwl_mvm_update_changed_regdom(mvm);
@@ -1685,6 +1705,13 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
        return _iwl_mvm_exit_d0i3(mvm);
 }
 
+#define IWL_MVM_D0I3_OPS                                       \
+       .enter_d0i3 = iwl_mvm_enter_d0i3,                       \
+       .exit_d0i3 = iwl_mvm_exit_d0i3,
+#else /* CONFIG_PM */
+#define IWL_MVM_D0I3_OPS
+#endif /* CONFIG_PM */
+
 #define IWL_MVM_COMMON_OPS                                     \
        /* these could be differentiated */                     \
        .async_cb = iwl_mvm_async_cb,                           \
@@ -1695,8 +1722,7 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
        .nic_error = iwl_mvm_nic_error,                         \
        .cmd_queue_full = iwl_mvm_cmd_queue_full,               \
        .nic_config = iwl_mvm_nic_config,                       \
-       .enter_d0i3 = iwl_mvm_enter_d0i3,                       \
-       .exit_d0i3 = iwl_mvm_exit_d0i3,                         \
+       IWL_MVM_D0I3_OPS                                        \
        /* as we only register one, these MUST be common! */    \
        .start = iwl_op_mode_mvm_start,                         \
        .stop = iwl_op_mode_mvm_stop
index c11fe2621d510c906771c6cb7d48168dbd5b9e6a..5a0a28fd762d7557071694b449cc1013a56ec638 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 690559bdf421b26f5a8abb41b784b19070e670b2..5e62b97af48b35c53abc99cc3fa6f74e35c0f829 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 8169d1450b3b9b3954ff45e4b32d44fd3062788b..7a98e1a1dc4074f45c5e9bb55fa9d69839554e5e 100644 (file)
@@ -117,20 +117,42 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
 {
        struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
        struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+       struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
        bool vht_ena = vht_cap && vht_cap->vht_supported;
        u16 flags = 0;
 
        if (mvm->cfg->ht_params->stbc &&
-           (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
-           ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
-            (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))))
-               flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+           (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
+               if (he_cap && he_cap->has_he) {
+                       if (he_cap->he_cap_elem.phy_cap_info[2] &
+                           IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+                               flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+
+                       if (he_cap->he_cap_elem.phy_cap_info[7] &
+                           IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
+                               flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
+               } else if ((ht_cap &&
+                           (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
+                          (vht_ena &&
+                           (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)))
+                       flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+       }
 
        if (mvm->cfg->ht_params->ldpc &&
            ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
             (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
                flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
 
+       if (he_cap && he_cap->has_he &&
+           (he_cap->he_cap_elem.phy_cap_info[3] &
+            IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) {
+               flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
+
+               if (he_cap->he_cap_elem.phy_cap_info[3] &
+                   IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2)
+                       flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK;
+       }
+
        return flags;
 }
 
@@ -311,7 +333,7 @@ out:
 }
 
 void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                    enum nl80211_band band)
+                    enum nl80211_band band, bool update)
 {
        struct ieee80211_hw *hw = mvm->hw;
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -320,7 +342,8 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        struct ieee80211_supported_band *sband;
        struct iwl_tlc_config_cmd cfg_cmd = {
                .sta_id = mvmsta->sta_id,
-               .max_ch_width = rs_fw_bw_from_sta_bw(sta),
+               .max_ch_width = update ?
+                       rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
                .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
                .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
                .max_mpdu_len = cpu_to_le16(sta->max_amsdu_len),
index 30cfd7d50bc939fae1d0111c2c4694f755c9c947..2c75f51a04e4d0bafe48a3536d58a3ac814692dd 100644 (file)
@@ -1276,7 +1276,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                       (unsigned long)(lq_sta->last_tx +
                                       (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
                IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
-               iwl_mvm_rs_rate_init(mvm, sta, info->band);
+               iwl_mvm_rs_rate_init(mvm, sta, info->band, true);
                return;
        }
        lq_sta->last_tx = jiffies;
@@ -2859,9 +2859,8 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
 static void rs_initialize_lq(struct iwl_mvm *mvm,
                             struct ieee80211_sta *sta,
                             struct iwl_lq_sta *lq_sta,
-                            enum nl80211_band band)
+                            enum nl80211_band band, bool update)
 {
-       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_scale_tbl_info *tbl;
        struct rs_rate *rate;
        u8 active_tbl = 0;
@@ -2890,8 +2889,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
        rs_set_expected_tpt_table(lq_sta, tbl);
        rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
        /* TODO restore station should remember the lq cmd */
-       iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq,
-                           mvmsta->sta_state < IEEE80211_STA_AUTHORIZED);
+       iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update);
 }
 
 static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
@@ -3144,7 +3142,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
  * Called after adding a new station to initialize rate scaling
  */
 static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                            enum nl80211_band band)
+                            enum nl80211_band band, bool update)
 {
        int i, j;
        struct ieee80211_hw *hw = mvm->hw;
@@ -3215,7 +3213,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 
        /* These values will be overridden later */
        lq_sta->lq.single_stream_ant_msk =
-               first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
+               iwl_mvm_bt_coex_get_single_ant_msk(mvm, iwl_mvm_get_valid_tx_ant(mvm));
        lq_sta->lq.dual_stream_ant_msk = ANT_AB;
 
        /* as default allow aggregation for all tids */
@@ -3224,7 +3222,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        iwl_mvm_reset_frame_stats(mvm);
 #endif
-       rs_initialize_lq(mvm, sta, lq_sta, band);
+       rs_initialize_lq(mvm, sta, lq_sta, band, update);
 }
 
 static void rs_drv_rate_update(void *mvm_r,
@@ -3244,7 +3242,7 @@ static void rs_drv_rate_update(void *mvm_r,
        for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
                ieee80211_stop_tx_ba_session(sta, tid);
 
-       iwl_mvm_rs_rate_init(mvm, sta, sband->band);
+       iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
 }
 
 #ifdef CONFIG_MAC80211_DEBUGFS
@@ -3578,7 +3576,8 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
 
-       if (num_of_ant(initial_rate->ant) == 1)
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) &&
+           num_of_ant(initial_rate->ant) == 1)
                lq_cmd->single_stream_ant_msk = initial_rate->ant;
 
        lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
@@ -4098,12 +4097,12 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
 };
 
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                         enum nl80211_band band)
+                         enum nl80211_band band, bool update)
 {
        if (iwl_mvm_has_tlc_offload(mvm))
-               rs_fw_rate_init(mvm, sta, band);
+               rs_fw_rate_init(mvm, sta, band, update);
        else
-               rs_drv_rate_init(mvm, sta, band);
+               rs_drv_rate_init(mvm, sta, band, update);
 }
 
 int iwl_mvm_rate_control_register(void)
index d2cf484e2b73be77bd61006d0aeb77683baec58d..d0f47899f2849505eb60d5343f19c216f8e3fb3c 100644 (file)
@@ -420,7 +420,7 @@ struct iwl_lq_sta {
 
 /* Initialize station's rate scaling information after adding station */
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                         enum nl80211_band band);
+                         enum nl80211_band band, bool init);
 
 /* Notify RS about Tx status */
 void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
@@ -461,7 +461,7 @@ void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
 
 void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
 void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                    enum nl80211_band band);
+                    enum nl80211_band band, bool update);
 int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
                        bool enable);
 void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
index bfb163419c679c2a98abb4fc09bf85e4c62c4f75..ef624833cf1b7bdda6c9bcffed29cc6d0afb3cb6 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -438,13 +433,14 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
                struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
                struct ieee80211_vif *tx_blocked_vif =
                        rcu_dereference(mvm->csa_tx_blocked_vif);
+               struct iwl_fw_dbg_trigger_tlv *trig;
+               struct ieee80211_vif *vif = mvmsta->vif;
 
                /* We have tx blocked stations (with CS bit). If we heard
                 * frames from a blocked station on a new channel we can
                 * TX to it again.
                 */
-               if (unlikely(tx_blocked_vif) &&
-                   mvmsta->vif == tx_blocked_vif) {
+               if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) {
                        struct iwl_mvm_vif *mvmvif =
                                iwl_mvm_vif_from_mac80211(tx_blocked_vif);
 
@@ -455,23 +451,18 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
 
                rs_update_last_rssi(mvm, mvmsta, rx_status);
 
-               if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
-                   ieee80211_is_beacon(hdr->frame_control)) {
-                       struct iwl_fw_dbg_trigger_tlv *trig;
+               trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
+                                            ieee80211_vif_to_wdev(vif),
+                                            FW_DBG_TRIGGER_RSSI);
+
+               if (trig && ieee80211_is_beacon(hdr->frame_control)) {
                        struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
-                       bool trig_check;
                        s32 rssi;
 
-                       trig = iwl_fw_dbg_get_trigger(mvm->fw,
-                                                     FW_DBG_TRIGGER_RSSI);
                        rssi_trig = (void *)trig->data;
                        rssi = le32_to_cpu(rssi_trig->rssi);
 
-                       trig_check =
-                               iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
-                                                             ieee80211_vif_to_wdev(mvmsta->vif),
-                                                             trig);
-                       if (trig_check && rx_status->signal < rssi)
+                       if (rx_status->signal < rssi)
                                iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
                                                        NULL);
                }
@@ -698,15 +689,12 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
        struct iwl_fw_dbg_trigger_stats *trig_stats;
        u32 trig_offset, trig_thold;
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS))
+       trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS);
+       if (!trig)
                return;
 
-       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
        trig_stats = (void *)trig->data;
 
-       if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
-               return;
-
        trig_offset = le32_to_cpu(trig_stats->stop_offset);
        trig_thold = le32_to_cpu(trig_stats->stop_threshold);
 
index b53148f972a4a4e5c6e04d34ed7c052144235282..26ac9402568de4703b24f5917a56e7b113a60979 100644 (file)
@@ -283,6 +283,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                    !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
                        return 0;
 
+               if (mvm->trans->cfg->gen2 &&
+                   !(status & RX_MPDU_RES_STATUS_MIC_OK))
+                       stats->flag |= RX_FLAG_MMIC_ERROR;
+
                *crypt_len = IEEE80211_TKIP_IV_LEN;
                /* fall through if TTAK OK */
        case IWL_RX_MPDU_STATUS_SEC_WEP:
@@ -294,8 +298,11 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                                IWL_RX_MPDU_STATUS_SEC_WEP)
                        *crypt_len = IEEE80211_WEP_IV_LEN;
 
-               if (pkt_flags & FH_RSCSR_RADA_EN)
+               if (pkt_flags & FH_RSCSR_RADA_EN) {
                        stats->flag |= RX_FLAG_ICV_STRIPPED;
+                       if (mvm->trans->cfg->gen2)
+                               stats->flag |= RX_FLAG_MMIC_STRIPPED;
+               }
 
                return 0;
        case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
@@ -856,6 +863,444 @@ static void iwl_mvm_flip_address(u8 *addr)
        ether_addr_copy(addr, mac_addr);
 }
 
+static void iwl_mvm_decode_he_sigb(struct iwl_mvm *mvm,
+                                  struct iwl_rx_mpdu_desc *desc,
+                                  u32 rate_n_flags,
+                                  struct ieee80211_radiotap_he_mu *he_mu)
+{
+       u32 sigb0, sigb1;
+       u16 sigb2;
+
+       if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+               sigb0 = le32_to_cpu(desc->v3.sigb_common0);
+               sigb1 = le32_to_cpu(desc->v3.sigb_common1);
+       } else {
+               sigb0 = le32_to_cpu(desc->v1.sigb_common0);
+               sigb1 = le32_to_cpu(desc->v1.sigb_common1);
+       }
+
+       sigb2 = le16_to_cpu(desc->sigb_common2);
+
+       if (FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH1_CRC_OK, sigb2)) {
+               he_mu->flags1 |=
+                       cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN |
+                                   IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN);
+
+               he_mu->flags1 |=
+                       le16_encode_bits(FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH1_CTR_RU,
+                                                  sigb2),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU);
+
+               he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH1_RU0,
+                                            sigb0);
+               he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH1_RU1,
+                                            sigb1);
+               he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH1_RU2,
+                                            sigb0);
+               he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH1_RU3,
+                                            sigb1);
+       }
+
+       if (FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH2_CRC_OK, sigb2) &&
+           (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) {
+               he_mu->flags1 |=
+                       cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN |
+                                   IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN);
+
+               he_mu->flags2 |=
+                       le16_encode_bits(FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH2_CTR_RU,
+                                                  sigb2),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU);
+
+               he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH2_RU0,
+                                            sigb0);
+               he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH2_RU1,
+                                            sigb1);
+               he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH2_RU2,
+                                            sigb0);
+               he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH2_RU3,
+                                            sigb1);
+       }
+}
+
+static void
+iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags,
+                              struct ieee80211_radiotap_he *he,
+                              struct ieee80211_radiotap_he_mu *he_mu,
+                              struct ieee80211_rx_status *rx_status)
+{
+       /*
+        * Unfortunately, we have to leave the mac80211 data
+        * incorrect for the case that we receive an HE-MU
+        * transmission and *don't* have the HE phy data (due
+        * to the bits being used for TSF). This shouldn't
+        * happen though as management frames where we need
+        * the TSF/timers are not be transmitted in HE-MU.
+        */
+       u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
+       u8 offs = 0;
+
+       rx_status->bw = RATE_INFO_BW_HE_RU;
+
+       he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+
+       switch (ru) {
+       case 0 ... 36:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+               offs = ru;
+               break;
+       case 37 ... 52:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+               offs = ru - 37;
+               break;
+       case 53 ... 60:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+               offs = ru - 53;
+               break;
+       case 61 ... 64:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+               offs = ru - 61;
+               break;
+       case 65 ... 66:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+               offs = ru - 65;
+               break;
+       case 67:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+               break;
+       case 68:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+               break;
+       }
+       he->data2 |= le16_encode_bits(offs,
+                                     IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+       he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
+                                IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
+       if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
+               he->data2 |=
+                       cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+
+       if (he_mu) {
+#define CHECK_BW(bw) \
+       BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
+                    RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
+               CHECK_BW(20);
+               CHECK_BW(40);
+               CHECK_BW(80);
+               CHECK_BW(160);
+               he_mu->flags2 |=
+                       le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
+                                                  rate_n_flags),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
+       }
+}
+
+static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
+                                      struct iwl_rx_mpdu_desc *desc,
+                                      struct ieee80211_radiotap_he *he,
+                                      struct ieee80211_radiotap_he_mu *he_mu,
+                                      struct ieee80211_rx_status *rx_status,
+                                      u64 he_phy_data, u32 rate_n_flags,
+                                      int queue)
+{
+       u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+       bool sigb_data;
+       u16 d1known = IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+                     IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+                     IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
+                     IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
+                     IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN;
+       u16 d2known = IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+                     IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+                     IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN;
+
+       he->data1 |= cpu_to_le16(d1known);
+       he->data2 |= cpu_to_le16(d2known);
+       he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BSS_COLOR_MASK,
+                                               he_phy_data),
+                                     IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
+       he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_UPLINK,
+                                               he_phy_data),
+                                     IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+       he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_LDPC_EXT_SYM,
+                                               he_phy_data),
+                                     IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
+       he->data4 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SPATIAL_REUSE_MASK,
+                                               he_phy_data),
+                                     IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
+       he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PRE_FEC_PAD_MASK,
+                                               he_phy_data),
+                                     IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
+       he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PE_DISAMBIG,
+                                               he_phy_data),
+                                     IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
+       he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_TXOP_DUR_MASK,
+                                               he_phy_data),
+                                     IEEE80211_RADIOTAP_HE_DATA6_TXOP);
+       he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_DOPPLER,
+                                               he_phy_data),
+                                     IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
+
+       switch (he_type) {
+       case RATE_MCS_HE_TYPE_MU:
+               he_mu->flags1 |=
+                       le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_DCM,
+                                                  he_phy_data),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+               he_mu->flags1 |=
+                       le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_MCS_MASK,
+                                                  he_phy_data),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+               he_mu->flags2 |=
+                       le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK,
+                                                 he_phy_data),
+                                       IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+               he_mu->flags2 |=
+                       le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_COMPRESSION,
+                                                  he_phy_data),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+               he_mu->flags2 |=
+                       le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK,
+                                                  he_phy_data),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
+
+               sigb_data = FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK,
+                                     he_phy_data) ==
+                               IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO;
+               if (sigb_data)
+                       iwl_mvm_decode_he_sigb(mvm, desc, rate_n_flags, he_mu);
+               /* fall through */
+       case RATE_MCS_HE_TYPE_TRIG:
+               he->data2 |=
+                       cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+               he->data5 |=
+                       le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
+                                                  he_phy_data),
+                                        IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+               break;
+       case RATE_MCS_HE_TYPE_SU:
+       case RATE_MCS_HE_TYPE_EXT_SU:
+               he->data1 |=
+                       cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
+               he->data3 |=
+                       le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BEAM_CHNG,
+                                                  he_phy_data),
+                                        IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
+               break;
+       }
+
+       switch (FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data)) {
+       case IWL_RX_HE_PHY_INFO_TYPE_MU:
+       case IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO:
+       case IWL_RX_HE_PHY_INFO_TYPE_TB:
+               iwl_mvm_decode_he_phy_ru_alloc(he_phy_data, rate_n_flags,
+                                              he, he_mu, rx_status);
+               break;
+       default:
+               /* nothing */
+               break;
+       }
+}
+
+static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
+                         struct iwl_rx_mpdu_desc *desc,
+                         u32 rate_n_flags, u16 phy_info, int queue)
+{
+       struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+       /* this is invalid e.g. because puncture type doesn't allow 0b11 */
+#define HE_PHY_DATA_INVAL ((u64)-1)
+       u64 he_phy_data = HE_PHY_DATA_INVAL;
+       struct ieee80211_radiotap_he *he = NULL;
+       struct ieee80211_radiotap_he_mu *he_mu = NULL;
+       u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+       u8 stbc, ltf;
+       static const struct ieee80211_radiotap_he known = {
+               .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+                                    IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+                                    IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+                                    IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
+               .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+                                    IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
+       };
+       static const struct ieee80211_radiotap_he_mu mu_known = {
+               .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+                                     IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+                                     IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+                                     IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
+               .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
+                                     IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
+       };
+       unsigned int radiotap_len = 0;
+
+       he = skb_put_data(skb, &known, sizeof(known));
+       radiotap_len += sizeof(known);
+       rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+
+       if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
+               if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+                       he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+               else
+                       he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+               if (he_type == RATE_MCS_HE_TYPE_MU) {
+                       he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
+                       radiotap_len += sizeof(mu_known);
+                       rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+               }
+       }
+
+       /* temporarily hide the radiotap data */
+       __skb_pull(skb, radiotap_len);
+
+       if (he_phy_data != HE_PHY_DATA_INVAL &&
+           he_type == RATE_MCS_HE_TYPE_SU) {
+               /* report the AMPDU-EOF bit on single frames */
+               if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+                       rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+                       rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+                       if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, he_phy_data))
+                               rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+               }
+       }
+
+       if (he_phy_data != HE_PHY_DATA_INVAL)
+               iwl_mvm_decode_he_phy_data(mvm, desc, he, he_mu, rx_status,
+                                          he_phy_data, rate_n_flags, queue);
+
+       /* update aggregation data for monitor sake on default queue */
+       if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+               bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+
+               /* toggle is switched whenever new aggregation starts */
+               if (toggle_bit != mvm->ampdu_toggle &&
+                   he_phy_data != HE_PHY_DATA_INVAL &&
+                   (he_type == RATE_MCS_HE_TYPE_MU ||
+                    he_type == RATE_MCS_HE_TYPE_SU)) {
+                       rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+                       if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+                                     he_phy_data))
+                               rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+               }
+       }
+
+       if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
+           rate_n_flags & RATE_MCS_HE_106T_MSK) {
+               rx_status->bw = RATE_INFO_BW_HE_RU;
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+       }
+
+       /* actually data is filled in mac80211 */
+       if (he_type == RATE_MCS_HE_TYPE_SU ||
+           he_type == RATE_MCS_HE_TYPE_EXT_SU)
+               he->data1 |=
+                       cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+
+       stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS;
+       rx_status->nss =
+               ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+                                       RATE_VHT_MCS_NSS_POS) + 1;
+       rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+       rx_status->encoding = RX_ENC_HE;
+       rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+       if (rate_n_flags & RATE_MCS_BF_MSK)
+               rx_status->enc_flags |= RX_ENC_FLAG_BF;
+
+       rx_status->he_dcm =
+               !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+
+#define CHECK_TYPE(F)                                                  \
+       BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F !=        \
+                    (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+       CHECK_TYPE(SU);
+       CHECK_TYPE(EXT_SU);
+       CHECK_TYPE(MU);
+       CHECK_TYPE(TRIG);
+
+       he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
+
+       if (rate_n_flags & RATE_MCS_BF_MSK)
+               he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
+
+       switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
+               RATE_MCS_HE_GI_LTF_POS) {
+       case 0:
+               if (he_type == RATE_MCS_HE_TYPE_TRIG)
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+               else
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+               if (he_type == RATE_MCS_HE_TYPE_MU)
+                       ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+               else
+                       ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
+               break;
+       case 1:
+               if (he_type == RATE_MCS_HE_TYPE_TRIG)
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+               else
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+               ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+               break;
+       case 2:
+               if (he_type == RATE_MCS_HE_TYPE_TRIG) {
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+               } else {
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+               }
+               break;
+       case 3:
+               if ((he_type == RATE_MCS_HE_TYPE_SU ||
+                    he_type == RATE_MCS_HE_TYPE_EXT_SU) &&
+                   rate_n_flags & RATE_MCS_SGI_MSK)
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+               else
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+               ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+               break;
+       }
+
+       he->data5 |= le16_encode_bits(ltf, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+
+       if (he_type == RATE_MCS_HE_TYPE_SU ||
+           he_type == RATE_MCS_HE_TYPE_EXT_SU) {
+               u16 val;
+
+               /* LTF syms correspond to streams */
+               he->data2 |=
+                       cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+               switch (rx_status->nss) {
+               case 1:
+                       val = 0;
+                       break;
+               case 2:
+                       val = 1;
+                       break;
+               case 3:
+               case 4:
+                       val = 2;
+                       break;
+               case 5:
+               case 6:
+                       val = 3;
+                       break;
+               case 7:
+               case 8:
+                       val = 4;
+                       break;
+               default:
+                       WARN_ONCE(1, "invalid nss: %d\n",
+                                 rx_status->nss);
+                       val = 0;
+               }
+
+               he->data5 |=
+                       le16_encode_bits(val,
+                                        IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+       }
+}
+
 void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                        struct iwl_rx_cmd_buffer *rxb, int queue)
 {
@@ -869,12 +1314,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
        struct ieee80211_sta *sta = NULL;
        struct sk_buff *skb;
        u8 crypt_len = 0, channel, energy_a, energy_b;
-       struct ieee80211_radiotap_he *he = NULL;
-       struct ieee80211_radiotap_he_mu *he_mu = NULL;
-       u32 he_type = 0xffffffff;
-       /* this is invalid e.g. because puncture type doesn't allow 0b11 */
-#define HE_PHY_DATA_INVAL ((u64)-1)
-       u64 he_phy_data = HE_PHY_DATA_INVAL;
        size_t desc_size;
 
        if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
@@ -918,49 +1357,24 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 
        rx_status = IEEE80211_SKB_RXCB(skb);
 
-       if (rate_n_flags & RATE_MCS_HE_MSK) {
-               static const struct ieee80211_radiotap_he known = {
-                       .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
-                                            IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
-                                            IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
-                                            IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
-                       .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
-                                            IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
-               };
-               static const struct ieee80211_radiotap_he_mu mu_known = {
-                       .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
-                                             IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
-                                             IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
-                                             IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
-                       .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
-               };
-               unsigned int radiotap_len = 0;
-
-               he = skb_put_data(skb, &known, sizeof(known));
-               radiotap_len += sizeof(known);
-               rx_status->flag |= RX_FLAG_RADIOTAP_HE;
-
-               he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
-
-               if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
-                       if (mvm->trans->cfg->device_family >=
-                           IWL_DEVICE_FAMILY_22560)
-                               he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
-                       else
-                               he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
-
-                       if (he_type == RATE_MCS_HE_TYPE_MU) {
-                               he_mu = skb_put_data(skb, &mu_known,
-                                                    sizeof(mu_known));
-                               radiotap_len += sizeof(mu_known);
-                               rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
-                       }
-               }
-
-               /* temporarily hide the radiotap data */
-               __skb_pull(skb, radiotap_len);
+       /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
+       switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+       case RATE_MCS_CHAN_WIDTH_20:
+               break;
+       case RATE_MCS_CHAN_WIDTH_40:
+               rx_status->bw = RATE_INFO_BW_40;
+               break;
+       case RATE_MCS_CHAN_WIDTH_80:
+               rx_status->bw = RATE_INFO_BW_80;
+               break;
+       case RATE_MCS_CHAN_WIDTH_160:
+               rx_status->bw = RATE_INFO_BW_160;
+               break;
        }
 
+       if (rate_n_flags & RATE_MCS_HE_MSK)
+               iwl_mvm_rx_he(mvm, skb, desc, rate_n_flags, phy_info, queue);
+
        rx_status = IEEE80211_SKB_RXCB(skb);
 
        if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
@@ -995,53 +1409,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                rx_status->mactime = tsf_on_air_rise;
                /* TSF as indicated by the firmware is at INA time */
                rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
-       } else if (he_type == RATE_MCS_HE_TYPE_SU) {
-               u64 he_phy_data;
-
-               if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
-                       he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
-               else
-                       he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
-
-               he->data1 |=
-                       cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
-               if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
-                             he_phy_data))
-                       he->data3 |=
-                               cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
-
-               if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
-                       rx_status->ampdu_reference = mvm->ampdu_ref;
-                       mvm->ampdu_ref++;
-
-                       rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
-                       rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
-                       if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
-                                     he_phy_data))
-                               rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
-               }
-       } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
-               he_mu->flags1 |=
-                       le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
-                                                  he_phy_data),
-                                        IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
-               he_mu->flags1 |=
-                       le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
-                                                  he_phy_data),
-                                        IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
-               he_mu->flags1 |=
-                       le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
-                                                  he_phy_data),
-                                        IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
-               he_mu->flags2 |=
-                       le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
-                                                  he_phy_data),
-                                        IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
-               he_mu->flags2 |=
-                       le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
-                                                  he_phy_data),
-                                        IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
        }
+
        rx_status->device_timestamp = gp2_on_air_rise;
        rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
                NL80211_BAND_2GHZ;
@@ -1066,15 +1435,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                if (toggle_bit != mvm->ampdu_toggle) {
                        mvm->ampdu_ref++;
                        mvm->ampdu_toggle = toggle_bit;
-
-                       if (he_phy_data != HE_PHY_DATA_INVAL &&
-                           he_type == RATE_MCS_HE_TYPE_MU) {
-                               rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
-                               if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
-                                             he_phy_data))
-                                       rx_status->flag |=
-                                               RX_FLAG_AMPDU_EOF_BIT;
-                       }
                }
        }
 
@@ -1103,6 +1463,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
                               IWL_RX_MPDU_REORDER_BAID_MASK) >>
                               IWL_RX_MPDU_REORDER_BAID_SHIFT);
+               struct iwl_fw_dbg_trigger_tlv *trig;
+               struct ieee80211_vif *vif = mvmsta->vif;
 
                if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
                    !is_multicast_ether_addr(hdr->addr1) &&
@@ -1115,8 +1477,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                 * frames from a blocked station on a new channel we can
                 * TX to it again.
                 */
-               if (unlikely(tx_blocked_vif) &&
-                   tx_blocked_vif == mvmsta->vif) {
+               if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) {
                        struct iwl_mvm_vif *mvmvif =
                                iwl_mvm_vif_from_mac80211(tx_blocked_vif);
 
@@ -1127,23 +1488,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 
                rs_update_last_rssi(mvm, mvmsta, rx_status);
 
-               if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
-                   ieee80211_is_beacon(hdr->frame_control)) {
-                       struct iwl_fw_dbg_trigger_tlv *trig;
+               trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
+                                            ieee80211_vif_to_wdev(vif),
+                                            FW_DBG_TRIGGER_RSSI);
+
+               if (trig && ieee80211_is_beacon(hdr->frame_control)) {
                        struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
-                       bool trig_check;
                        s32 rssi;
 
-                       trig = iwl_fw_dbg_get_trigger(mvm->fw,
-                                                     FW_DBG_TRIGGER_RSSI);
                        rssi_trig = (void *)trig->data;
                        rssi = le32_to_cpu(rssi_trig->rssi);
 
-                       trig_check =
-                               iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
-                                                             ieee80211_vif_to_wdev(mvmsta->vif),
-                                                             trig);
-                       if (trig_check && rx_status->signal < rssi)
+                       if (rx_status->signal < rssi)
                                iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
                                                        NULL);
                }
@@ -1183,84 +1539,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                }
        }
 
-       switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
-       case RATE_MCS_CHAN_WIDTH_20:
-               break;
-       case RATE_MCS_CHAN_WIDTH_40:
-               rx_status->bw = RATE_INFO_BW_40;
-               break;
-       case RATE_MCS_CHAN_WIDTH_80:
-               rx_status->bw = RATE_INFO_BW_80;
-               break;
-       case RATE_MCS_CHAN_WIDTH_160:
-               rx_status->bw = RATE_INFO_BW_160;
-               break;
-       }
-
-       if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
-           rate_n_flags & RATE_MCS_HE_106T_MSK) {
-               rx_status->bw = RATE_INFO_BW_HE_RU;
-               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
-       }
-
-       if (rate_n_flags & RATE_MCS_HE_MSK &&
-           phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
-           he_type == RATE_MCS_HE_TYPE_MU) {
-               /*
-                * Unfortunately, we have to leave the mac80211 data
-                * incorrect for the case that we receive an HE-MU
-                * transmission and *don't* have the he_mu pointer,
-                * i.e. we don't have the phy data (due to the bits
-                * being used for TSF). This shouldn't happen though
-                * as management frames where we need the TSF/timers
-                * are not be transmitted in HE-MU, I think.
-                */
-               u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
-               u8 offs = 0;
-
-               rx_status->bw = RATE_INFO_BW_HE_RU;
-
-               switch (ru) {
-               case 0 ... 36:
-                       rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
-                       offs = ru;
-                       break;
-               case 37 ... 52:
-                       rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
-                       offs = ru - 37;
-                       break;
-               case 53 ... 60:
-                       rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
-                       offs = ru - 53;
-                       break;
-               case 61 ... 64:
-                       rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
-                       offs = ru - 61;
-                       break;
-               case 65 ... 66:
-                       rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
-                       offs = ru - 65;
-                       break;
-               case 67:
-                       rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
-                       break;
-               case 68:
-                       rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
-                       break;
-               }
-               he->data2 |=
-                       le16_encode_bits(offs,
-                                        IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
-               he->data2 |=
-                       cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
-               if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
-                       he->data2 |=
-                               cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
-       } else if (he) {
-               he->data1 |=
-                       cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
-       }
-
        if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
            rate_n_flags & RATE_MCS_SGI_MSK)
                rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
@@ -1285,120 +1563,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
                if (rate_n_flags & RATE_MCS_BF_MSK)
                        rx_status->enc_flags |= RX_ENC_FLAG_BF;
-       } else if (he) {
-               u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
-                               RATE_MCS_STBC_POS;
-               rx_status->nss =
-                       ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
-                                               RATE_VHT_MCS_NSS_POS) + 1;
-               rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
-               rx_status->encoding = RX_ENC_HE;
-               rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
-               if (rate_n_flags & RATE_MCS_BF_MSK)
-                       rx_status->enc_flags |= RX_ENC_FLAG_BF;
-
-               rx_status->he_dcm =
-                       !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
-
-#define CHECK_TYPE(F)                                                  \
-       BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F !=        \
-                    (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
-
-               CHECK_TYPE(SU);
-               CHECK_TYPE(EXT_SU);
-               CHECK_TYPE(MU);
-               CHECK_TYPE(TRIG);
-
-               he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
-
-               if (rate_n_flags & RATE_MCS_BF_POS)
-                       he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
-
-               switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
-                       RATE_MCS_HE_GI_LTF_POS) {
-               case 0:
-                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
-                       break;
-               case 1:
-                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
-                       break;
-               case 2:
-                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
-                       break;
-               case 3:
-                       if (rate_n_flags & RATE_MCS_SGI_MSK)
-                               rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
-                       else
-                               rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
-                       break;
-               }
-
-               switch (he_type) {
-               case RATE_MCS_HE_TYPE_SU: {
-                       u16 val;
-
-                       /* LTF syms correspond to streams */
-                       he->data2 |=
-                               cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
-                       switch (rx_status->nss) {
-                       case 1:
-                               val = 0;
-                               break;
-                       case 2:
-                               val = 1;
-                               break;
-                       case 3:
-                       case 4:
-                               val = 2;
-                               break;
-                       case 5:
-                       case 6:
-                               val = 3;
-                               break;
-                       case 7:
-                       case 8:
-                               val = 4;
-                               break;
-                       default:
-                               WARN_ONCE(1, "invalid nss: %d\n",
-                                         rx_status->nss);
-                               val = 0;
-                       }
-                       he->data5 |=
-                               le16_encode_bits(val,
-                                                IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
-                       }
-                       break;
-               case RATE_MCS_HE_TYPE_MU: {
-                       u16 val;
-                       u64 he_phy_data;
-
-                       if (mvm->trans->cfg->device_family >=
-                           IWL_DEVICE_FAMILY_22560)
-                               he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
-                       else
-                               he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
-
-                       if (he_phy_data == HE_PHY_DATA_INVAL)
-                               break;
-
-                       val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
-                                       he_phy_data);
-
-                       he->data2 |=
-                               cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
-                       he->data5 |=
-                               cpu_to_le16(FIELD_PREP(
-                                       IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
-                                       val));
-                       }
-                       break;
-               case RATE_MCS_HE_TYPE_EXT_SU:
-               case RATE_MCS_HE_TYPE_TRIG:
-                       /* not supported yet */
-                       break;
-               }
-       } else {
+       } else if (!(rate_n_flags & RATE_MCS_HE_MSK)) {
                int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
                                                               rx_status->band);
 
@@ -1409,7 +1574,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                        goto out;
                }
                rx_status->rate_idx = rate;
-
        }
 
        /* management stuff on default queue */
index 11ecdf63b7325b22f49402b10f2b1ca654c1fb36..ffcd0ca86041a64e20994dba288339ca5d9e7920 100644 (file)
@@ -19,9 +19,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -836,16 +833,25 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
                                        struct ieee80211_vif *vif)
 {
        const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
+       bool low_latency;
+
+       if (iwl_mvm_is_cdb_supported(mvm))
+               low_latency = iwl_mvm_low_latency_band(mvm, NL80211_BAND_5GHZ);
+       else
+               low_latency = iwl_mvm_low_latency(mvm);
 
        /* We can only use EBS if:
         *      1. the feature is supported;
         *      2. the last EBS was successful;
         *      3. if only single scan, the single scan EBS API is supported;
         *      4. it's not a p2p find operation.
+        *      5. we are not in low latency mode,
+        *         or if fragmented ebs is supported by the FW
         */
        return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
                mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS &&
-               vif->type != NL80211_IFTYPE_P2P_DEVICE);
+               vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+               (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)));
 }
 
 static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
@@ -1442,6 +1448,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED)
                        cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] =
                                                        IWL_SCAN_NUM_OF_FRAGS;
+
+               cmd->v8.general_flags2 =
+                       IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
        }
 
        cmd->scan_start_mac_id = scan_vif->id;
@@ -1449,11 +1458,21 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
                cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
 
-       if (iwl_mvm_scan_use_ebs(mvm, vif))
+       if (iwl_mvm_scan_use_ebs(mvm, vif)) {
                channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
                                IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
                                IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
 
+               /* set fragmented ebs for fragmented scan on HB channels */
+               if (iwl_mvm_is_frag_ebs_supported(mvm)) {
+                       if (gen_flags &
+                           IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED ||
+                           (!iwl_mvm_is_cdb_supported(mvm) &&
+                            gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED))
+                               channel_flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+               }
+       }
+
        chan_param->flags = channel_flags;
        chan_param->count = params->n_channels;
 
index 539b06bf08031b321eed18ab6b51614d812e783c..d1d76bb9a75007651c5161f0f37f4d2351e90df9 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 18db1ed92d9b09741e0fec60e66389bd5af18dea..8f929c774e70c9e3bf3a7c5a42bee2f36a83fb96 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
 #include "sta.h"
 #include "rs.h"
 
+static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
+
+static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
+                               u32 sta_id,
+                               struct ieee80211_key_conf *key, bool mcast,
+                               u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
+                               u8 key_offset, bool mfp);
+
 /*
  * New version of ADD_STA_sta command added new fields at the end of the
  * structure, so sending the size of the relevant API's structure is enough to
@@ -2101,6 +2104,19 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
                                   &cfg, timeout);
 
+       if (mvmvif->ap_wep_key) {
+               u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
+
+               if (key_offset == STA_KEY_IDX_INVALID)
+                       return -ENOSPC;
+
+               ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
+                                          mvmvif->ap_wep_key, 1, 0, NULL, 0,
+                                          key_offset, 0);
+               if (ret)
+                       return ret;
+       }
+
        return 0;
 }
 
@@ -3133,10 +3149,6 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
 
        switch (keyconf->cipher) {
        case WLAN_CIPHER_SUITE_TKIP:
-               if (vif->type == NL80211_IFTYPE_AP) {
-                       ret = -EINVAL;
-                       break;
-               }
                addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
                /* get phase 1 key from mac80211 */
                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
index 67f360c0d17e1d6f359b2f67dccd1b260a63eba6..e02f4eb203598f4e6c142004d1bcf4a66bb63c7f 100644 (file)
@@ -18,9 +18,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index cbbc16fd006a79ed17533f613eebf20a7c186cd9..ff82af11de8df3d07529b40ebf4290e7cb4b798c 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index cd91bc44259c63c549062a62bd08cc7705e2748d..e1a6f4e2225331cd93c0f6d7ff6ac2063aaaeddf 100644 (file)
@@ -254,17 +254,14 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
        struct iwl_fw_dbg_trigger_time_event *te_trig;
        int i;
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
+       trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
+                                    ieee80211_vif_to_wdev(te_data->vif),
+                                    FW_DBG_TRIGGER_TIME_EVENT);
+       if (!trig)
                return;
 
-       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
        te_trig = (void *)trig->data;
 
-       if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
-                                          ieee80211_vif_to_wdev(te_data->vif),
-                                          trig))
-               return;
-
        for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
                u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
                u32 trig_action_bitmap =
index 3d2e8b6159bb1fd42e0ea0e5132c29e1e4ac9e0f..1dd3d01245ea888021008b5dc360b1a5d59d173d 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 2d0b8a391308501ff1c2b0fcf91c25163cff7c41..01e0a999063bd92b6dd3857cf56b6e6a5bf8133f 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 2ff560aa1a8252dc8dfd37def8702d08406c5b72..8138d0606c52737614d37fc505f087649203fd20 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index 1232f63278eb64167263e55255cb95b54c27258d..0b3e5c99d3164c07a477b970d78803ffde97d9da 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
index ff193dca2020c99987ca91df4a2d07ef6f58dbfd..99c64ea2619bd393430944a5ef0f361695616b09 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -35,6 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -82,15 +79,12 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
        struct iwl_fw_dbg_trigger_tlv *trig;
        struct iwl_fw_dbg_trigger_ba *ba_trig;
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+       trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
+       if (!trig)
                return;
 
-       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
        ba_trig = (void *)trig->data;
 
-       if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
-               return;
-
        if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
                return;
 
@@ -245,14 +239,18 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
                                          ssn);
        } else {
-               tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+               if (ieee80211_is_data(fc))
+                       tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+               else
+                       tx_cmd->tid_tspec = IWL_MAX_TID_COUNT;
+
                if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
                        tx_flags |= TX_CMD_FLG_SEQ_CTL;
                else
                        tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
        }
 
-       /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
+       /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */
        if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
                ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
        else
@@ -620,6 +618,66 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
        }
 }
 
+static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
+                                      struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct iwl_mvm_vif *mvmvif =
+               iwl_mvm_vif_from_mac80211(info->control.vif);
+       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+       int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
+       struct iwl_probe_resp_data *resp_data;
+       u8 *ie, *pos;
+       u8 match[] = {
+               (WLAN_OUI_WFA >> 16) & 0xff,
+               (WLAN_OUI_WFA >> 8) & 0xff,
+               WLAN_OUI_WFA & 0xff,
+               WLAN_OUI_TYPE_WFA_P2P,
+       };
+
+       rcu_read_lock();
+
+       resp_data = rcu_dereference(mvmvif->probe_resp_data);
+       if (!resp_data)
+               goto out;
+
+       if (!resp_data->notif.noa_active)
+               goto out;
+
+       ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
+                                         mgmt->u.probe_resp.variable,
+                                         skb->len - base_len,
+                                         match, 4, 2);
+       if (!ie) {
+               IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
+               goto out;
+       }
+
+       if (skb_tailroom(skb) < resp_data->noa_len) {
+               if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
+                       IWL_ERR(mvm,
+                               "Failed to reallocate probe resp\n");
+                       goto out;
+               }
+       }
+
+       pos = skb_put(skb, resp_data->noa_len);
+
+       *pos++ = WLAN_EID_VENDOR_SPECIFIC;
+       /* Set length of IE body (not including ID and length itself) */
+       *pos++ = resp_data->noa_len - 2;
+       *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
+       *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
+       *pos++ = WLAN_OUI_WFA & 0xff;
+       *pos++ = WLAN_OUI_TYPE_WFA_P2P;
+
+       memcpy(pos, &resp_data->notif.noa_attr,
+              resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
+
+out:
+       rcu_read_unlock();
+}
+
 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -628,6 +686,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        struct iwl_device_cmd *dev_cmd;
        u8 sta_id;
        int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
        int queue;
 
        /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
@@ -668,7 +727,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
                    info.control.vif->type == NL80211_IFTYPE_AP ||
                    info.control.vif->type == NL80211_IFTYPE_ADHOC) {
-                       if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
+                       if (!ieee80211_is_data(hdr->frame_control))
                                sta_id = mvmvif->bcast_sta.sta_id;
                        else
                                sta_id = mvmvif->mcast_sta.sta_id;
@@ -689,6 +748,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                }
        }
 
+       if (unlikely(ieee80211_is_probe_resp(fc)))
+               iwl_mvm_probe_resp_set_noa(mvm, skb);
+
        IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
 
        dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
@@ -775,6 +837,36 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
        return 0;
 }
 
+static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
+                                          struct ieee80211_sta *sta,
+                                          unsigned int tid)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
+       u8 ac = tid_to_mac80211_ac[tid];
+       unsigned int txf;
+       int lmac = IWL_LMAC_24G_INDEX;
+
+       if (iwl_mvm_is_cdb_supported(mvm) &&
+           band == NL80211_BAND_5GHZ)
+               lmac = IWL_LMAC_5G_INDEX;
+
+       /* For HE redirect to trigger based fifos */
+       if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
+               ac += 4;
+
+       txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
+
+       /*
+        * Don't send an AMSDU that will be longer than the TXF.
+        * Add a security margin of 256 for the TX command + headers.
+        * We also want to have the start of the next packet inside the
+        * fifo to be able to send bursts.
+        */
+       return min_t(unsigned int, mvmsta->max_amsdu_len,
+                    mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+}
+
 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
                          struct ieee80211_tx_info *info,
                          struct ieee80211_sta *sta,
@@ -787,7 +879,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
        u16 snap_ip_tcp, pad;
        unsigned int dbg_max_amsdu_len;
        netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
-       u8 tid, txf;
+       u8 tid;
 
        snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
                tcp_hdrlen(skb);
@@ -826,20 +918,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
            !(mvmsta->amsdu_enabled & BIT(tid)))
                return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
 
-       max_amsdu_len = mvmsta->max_amsdu_len;
-
-       /* the Tx FIFO to which this A-MSDU will be routed */
-       txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]);
-
-       /*
-        * Don't send an AMSDU that will be longer than the TXF.
-        * Add a security margin of 256 for the TX command + headers.
-        * We also want to have the start of the next packet inside the
-        * fifo to be able to send bursts.
-        */
-       max_amsdu_len = min_t(unsigned int, max_amsdu_len,
-                             mvm->fwrt.smem_cfg.lmac[0].txfifo_size[txf] -
-                             256);
+       max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid);
 
        if (unlikely(dbg_max_amsdu_len))
                max_amsdu_len = min_t(unsigned int, max_amsdu_len,
@@ -1010,6 +1089,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
                return -1;
 
+       if (unlikely(ieee80211_is_probe_resp(fc)))
+               iwl_mvm_probe_resp_set_noa(mvm, skb);
+
        dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
                                        sta, mvmsta->sta_id);
        if (!dev_cmd)
@@ -1049,6 +1131,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
                        /* update the tx_cmd hdr as it was already copied */
                        tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
                }
+       } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) {
+               tid = IWL_TID_NON_QOS;
        }
 
        txq_id = mvmsta->tid_data[tid].txq_id;
@@ -1327,15 +1411,13 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
        struct iwl_fw_dbg_trigger_tx_status *status_trig;
        int i;
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
+       trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
+                                    FW_DBG_TRIGGER_TX_STATUS);
+       if (!trig)
                return;
 
-       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
        status_trig = (void *)trig->data;
 
-       if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
-               return;
-
        for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
                /* don't collect on status 0 */
                if (!status_trig->statuses[i].status)
@@ -1405,6 +1487,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
        while (!skb_queue_empty(&skbs)) {
                struct sk_buff *skb = __skb_dequeue(&skbs);
                struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+               struct ieee80211_hdr *hdr = (void *)skb->data;
                bool flushed = false;
 
                skb_freed++;
@@ -1434,6 +1517,14 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        break;
                }
 
+               /*
+                * If we are freeing multiple frames, mark all the frames
+                * but the first one as acked, since they were acknowledged
+                * before
+                * */
+               if (skb_freed > 1)
+                       info->flags |= IEEE80211_TX_STAT_ACK;
+
                iwl_mvm_tx_status_check_trigger(mvm, status);
 
                info->status.rates[0].count = tx_resp->failure_frame + 1;
@@ -1449,11 +1540,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
                info->flags &= ~IEEE80211_TX_CTL_AMPDU;
 
-               /* W/A FW bug: seq_ctl is wrong when the status isn't success */
-               if (status != TX_STATUS_SUCCESS) {
-                       struct ieee80211_hdr *hdr = (void *)skb->data;
+               /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
+               if (ieee80211_is_back_req(hdr->frame_control))
+                       seq_ctl = 0;
+               else if (status != TX_STATUS_SUCCESS)
                        seq_ctl = le16_to_cpu(hdr->seq_ctrl);
-               }
 
                if (unlikely(!seq_ctl)) {
                        struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -1525,7 +1616,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                iwl_mvm_tx_airtime(mvm, mvmsta,
                                   le16_to_cpu(tx_resp->wireless_media_time));
 
-               if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
+               if (sta->wme && tid != IWL_MGMT_TID) {
                        struct iwl_mvm_tid_data *tid_data =
                                &mvmsta->tid_data[tid];
                        bool send_eosp_ndp = false;
@@ -1645,20 +1736,24 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
        u16 sequence = le16_to_cpu(pkt->hdr.sequence);
        struct iwl_mvm_sta *mvmsta;
        int queue = SEQ_TO_QUEUE(sequence);
+       struct ieee80211_sta *sta;
 
        if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
                         (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
                return;
 
-       if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
-               return;
-
        iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
 
        rcu_read_lock();
 
        mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
 
+       sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+       if (WARN_ON_ONCE(!sta || !sta->wme)) {
+               rcu_read_unlock();
+               return;
+       }
+
        if (!WARN_ON_ONCE(!mvmsta)) {
                mvmsta->tid_data[tid].rate_n_flags =
                        le32_to_cpu(tx_resp->initial_rate);
index b002a7afb5f591d8434b0858c37610392a2ec5ea..6c14d3413bdcd6489e41ee15ab8d78a9d4962b80 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -551,7 +546,6 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
 
        IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
 
-       trace_iwlwifi_dev_ucode_error(trans->dev, &table, table.hw_ver, table.brd_ver);
        IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
                desc_lookup(table.error_id));
        IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
@@ -725,19 +719,15 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
 int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
                            u8 sta_id, u8 tid, unsigned int timeout)
 {
-       struct iwl_tx_queue_cfg_cmd cmd = {
-               .flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
-               .sta_id = sta_id,
-               .tid = tid,
-       };
        int queue, size = IWL_DEFAULT_QUEUE_SIZE;
 
-       if (cmd.tid == IWL_MAX_TID_COUNT) {
-               cmd.tid = IWL_MGMT_TID;
+       if (tid == IWL_MAX_TID_COUNT) {
+               tid = IWL_MGMT_TID;
                size = IWL_MGMT_QUEUE_SIZE;
        }
-       queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
-                                   SCD_QUEUE_CFG, size, timeout);
+       queue = iwl_trans_txq_alloc(mvm->trans,
+                                   cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
+                                   sta_id, tid, SCD_QUEUE_CFG, size, timeout);
 
        if (queue < 0) {
                IWL_DEBUG_TX_QUEUES(mvm,
@@ -900,20 +890,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
 
 /**
  * iwl_mvm_send_lq_cmd() - Send link quality command
- * @init: This command is sent as part of station initialization right
- *        after station has been added.
+ * @sync: This command can be sent synchronously.
  *
  * The link quality command is sent as the last step of station creation.
  * This is the special case in which init is set and we call a callback in
  * this case to clear the state indicating that station creation is in
  * progress.
  */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync)
 {
        struct iwl_host_cmd cmd = {
                .id = LQ_CMD,
                .len = { sizeof(struct iwl_lq_cmd), },
-               .flags = init ? 0 : CMD_ASYNC,
+               .flags = sync ? 0 : CMD_ASYNC,
                .data = { lq, },
        };
 
@@ -1249,14 +1238,12 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        struct iwl_fw_dbg_trigger_tlv *trig;
        struct iwl_fw_dbg_trigger_mlme *trig_mlme;
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+       trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+                                    FW_DBG_TRIGGER_MLME);
+       if (!trig)
                goto out;
 
-       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
        trig_mlme = (void *)trig->data;
-       if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
-                                          ieee80211_vif_to_wdev(vif), trig))
-               goto out;
 
        if (trig_mlme->stop_connection_loss &&
            --trig_mlme->stop_connection_loss)
@@ -1441,14 +1428,12 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
        struct iwl_fw_dbg_trigger_tlv *trig;
        struct iwl_fw_dbg_trigger_ba *ba_trig;
 
-       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+       trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+                                    FW_DBG_TRIGGER_BA);
+       if (!trig)
                return;
 
-       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
        ba_trig = (void *)trig->data;
-       if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
-                                          ieee80211_vif_to_wdev(vif), trig))
-               return;
 
        if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
                return;
index 2146fda8da2fdbdece661ceb3e177ab7c5c2b83e..05ed4fb88e0c2c740a99b43426cf7ce334ea478c 100644 (file)
@@ -96,9 +96,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        /* Configure debug, for integration */
        iwl_pcie_alloc_fw_monitor(trans, 0);
        prph_sc_ctrl->hwm_cfg.hwm_base_addr =
-               cpu_to_le64(trans_pcie->fw_mon_phys);
+               cpu_to_le64(trans->fw_mon[0].physical);
        prph_sc_ctrl->hwm_cfg.hwm_size =
-               cpu_to_le32(trans_pcie->fw_mon_size);
+               cpu_to_le32(trans->fw_mon[0].size);
 
        /* allocate ucode sections in dram and set addresses */
        ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
index b2cd7ef5fc3a9ba3b37351745d6fdedfda985cf9..6f45a0303ddd62561efafe942b4be32cac56d635 100644 (file)
@@ -162,7 +162,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_context_info *ctxt_info;
        struct iwl_context_info_rbd_cfg *rx_cfg;
-       u32 control_flags = 0;
+       u32 control_flags = 0, rb_size;
        int ret;
 
        ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
@@ -177,11 +177,29 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
        /* size is in DWs */
        ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
 
+       switch (trans_pcie->rx_buf_size) {
+       case IWL_AMSDU_2K:
+               rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
+               break;
+       case IWL_AMSDU_4K:
+               rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
+               break;
+       case IWL_AMSDU_8K:
+               rb_size = IWL_CTXT_INFO_RB_SIZE_8K;
+               break;
+       case IWL_AMSDU_12K:
+               rb_size = IWL_CTXT_INFO_RB_SIZE_12K;
+               break;
+       default:
+               WARN_ON(1);
+               rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
+       }
+
        BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
-       control_flags = IWL_CTXT_INFO_RB_SIZE_4K |
-                       IWL_CTXT_INFO_TFD_FORMAT_LONG |
-                       RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
-                       IWL_CTXT_INFO_RB_CB_SIZE_POS;
+       control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG |
+                       (RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
+                        IWL_CTXT_INFO_RB_CB_SIZE_POS) |
+                       (rb_size << IWL_CTXT_INFO_RB_SIZE_POS);
        ctxt_info->control.control_flags = cpu_to_le32(control_flags);
 
        /* initialize RX default queue */
index b150da4c6721e6bdb087cd3f5ef887ea0792bd49..9e015212c2c0f5db501bebecf6201ee3010df4cc 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -646,34 +641,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+       {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
        {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)},
index b63d44b7cd7c7be1e6a0a3fe0081edc5ea4820b7..f9c4c64dee66038d31eebc8108effc46cbcb04cf 100644 (file)
@@ -17,9 +17,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program.
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
@@ -102,66 +99,6 @@ struct isr_statistics {
        u32 unhandled;
 };
 
-#define IWL_CD_STTS_OPTIMIZED_POS      0
-#define IWL_CD_STTS_OPTIMIZED_MSK      0x01
-#define IWL_CD_STTS_TRANSFER_STATUS_POS        1
-#define IWL_CD_STTS_TRANSFER_STATUS_MSK        0x0E
-#define IWL_CD_STTS_WIFI_STATUS_POS    4
-#define IWL_CD_STTS_WIFI_STATUS_MSK    0xF0
-
-/**
- * enum iwl_completion_desc_transfer_status -  transfer status (bits 1-3)
- * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
- *     In sniffer mode, when split is used, set in last CD completion. (RX)
- * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
- *     all CD completion. (RX)
- * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
- */
-enum iwl_completion_desc_transfer_status {
-       IWL_CD_STTS_UNUSED,
-       IWL_CD_STTS_UNUSED_2,
-       IWL_CD_STTS_END_TRANSFER,
-       IWL_CD_STTS_OVERFLOW,
-       IWL_CD_STTS_ABORTED,
-       IWL_CD_STTS_ERROR,
-};
-
-/**
- * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
- * @IWL_CD_STTS_VALID: the packet is valid (RX)
- * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
- * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
- * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
- * @IWL_CD_STTS_DUP: duplicate packet (RX)
- * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
- * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
- * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
- * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
- * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
- * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
- * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
- * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
- * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
- * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
- */
-enum iwl_completion_desc_wifi_status {
-       IWL_CD_STTS_VALID,
-       IWL_CD_STTS_FCS_ERR,
-       IWL_CD_STTS_SEC_KEY_ERR,
-       IWL_CD_STTS_DECRYPTION_ERR,
-       IWL_CD_STTS_DUP,
-       IWL_CD_STTS_ICV_MIC_ERR,
-       IWL_CD_STTS_INTERNAL_SNAP_ERR,
-       IWL_CD_STTS_SEC_PORT_FAIL,
-       IWL_CD_STTS_BA_OLD_SN,
-       IWL_CD_STTS_QOS_NULL,
-       IWL_CD_STTS_MAC_HDR_ERR,
-       IWL_CD_STTS_MAX_RETRANS,
-       IWL_CD_STTS_EX_LIFETIME,
-       IWL_CD_STTS_NOT_USED,
-       IWL_CD_STTS_REPLAY_ERR,
-};
-
 #define IWL_RX_TD_TYPE_MSK     0xff000000
 #define IWL_RX_TD_SIZE_MSK     0x00ffffff
 #define IWL_RX_TD_SIZE_2K      BIT(11)
@@ -463,18 +400,6 @@ enum iwl_image_response_code {
        IWL_IMAGE_RESP_FAIL             = 2,
 };
 
-/**
- * struct iwl_dram_data
- * @physical: page phy pointer
- * @block: pointer to the allocated block/page
- * @size: size of the block/page
- */
-struct iwl_dram_data {
-       dma_addr_t physical;
-       void *block;
-       int size;
-};
-
 /**
  * struct iwl_self_init_dram - dram data used by self init process
  * @fw: lmac and umac dram data
@@ -516,6 +441,7 @@ struct iwl_self_init_dram {
  * @ucode_write_complete: indicates that the ucode has been copied.
  * @ucode_write_waitq: wait queue for uCode load
  * @cmd_queue - command queue number
+ * @def_rx_queue - default rx queue number
  * @rx_buf_size: Rx buffer size
  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
@@ -525,9 +451,6 @@ struct iwl_self_init_dram {
  * @reg_lock: protect hw register access
  * @mutex: to protect stop_device / start_fw / start_hw
  * @cmd_in_flight: true when we have a host command in flight
- * @fw_mon_phys: physical address of the buffer for the firmware monitor
- * @fw_mon_page: points to the first page of the buffer for the firmware monitor
- * @fw_mon_size: size of the buffer for the firmware monitor
  * @msix_entries: array of MSI-X entries
  * @msix_enabled: true if managed to enable MSI-X
  * @shared_vec_mask: the type of causes the shared vector handles
@@ -539,7 +462,6 @@ struct iwl_self_init_dram {
  * @fh_mask: current unmasked fh causes
  * @hw_mask: current unmasked hw causes
  * @in_rescan: true if we have triggered a device rescan
- * @scheduled_for_removal: true if we have scheduled a device removal
  */
 struct iwl_trans_pcie {
        struct iwl_rxq *rxq;
@@ -596,6 +518,7 @@ struct iwl_trans_pcie {
        u8 page_offs, dev_cmd_offs;
 
        u8 cmd_queue;
+       u8 def_rx_queue;
        u8 cmd_fifo;
        unsigned int cmd_q_wdg_timeout;
        u8 n_no_reclaim_cmds;
@@ -615,10 +538,6 @@ struct iwl_trans_pcie {
        bool cmd_hold_nic_awake;
        bool ref_cmd_in_flight;
 
-       dma_addr_t fw_mon_phys;
-       struct page *fw_mon_page;
-       u32 fw_mon_size;
-
        struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
        bool msix_enabled;
        u8 shared_vec_mask;
@@ -631,7 +550,6 @@ struct iwl_trans_pcie {
        cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
        u16 tx_cmd_queue_size;
        bool in_rescan;
-       bool scheduled_for_removal;
 };
 
 static inline struct iwl_trans_pcie *
@@ -673,6 +591,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
 /*****************************************************
 * RX
 ******************************************************/
+int _iwl_pcie_rx_init(struct iwl_trans *trans);
 int iwl_pcie_rx_init(struct iwl_trans *trans);
 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
@@ -686,6 +605,7 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
 int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
                            struct iwl_rxq *rxq);
+int iwl_pcie_rx_alloc(struct iwl_trans *trans);
 
 /*****************************************************
 * ICT - interrupt handling
@@ -700,7 +620,8 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
 * TX / HCMD
 ******************************************************/
 int iwl_pcie_tx_init(struct iwl_trans *trans);
-int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
+int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
+                         int queue_size);
 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
 int iwl_pcie_tx_stop(struct iwl_trans *trans);
 void iwl_pcie_tx_free(struct iwl_trans *trans);
@@ -717,11 +638,17 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                      struct iwl_device_cmd *dev_cmd, int txq_id);
 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
+void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
+void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
+                                 struct iwl_txq *txq);
 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
                            struct iwl_rx_cmd_buffer *rxb);
 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
                            struct sk_buff_head *skbs);
 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+                                  struct iwl_txq *txq, u16 byte_cnt,
+                                  int num_tbs);
 
 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
                                          u8 idx)
@@ -1039,6 +966,7 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
 }
 
 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
+void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
@@ -1057,6 +985,7 @@ void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
 void iwl_pcie_rx_allocator_work(struct work_struct *data);
 
 /* common functions that are used by gen2 transport */
+int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
 void iwl_pcie_apm_config(struct iwl_trans *trans);
 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
@@ -1088,8 +1017,16 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
                                 const struct fw_img *fw, bool run_in_rfkill);
 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
+                                  struct iwl_txq *txq);
+int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
+                                    struct iwl_txq **intxq, int size,
+                                    unsigned int timeout);
+int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
+                                     struct iwl_txq *txq,
+                                     struct iwl_host_cmd *hcmd);
 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
-                                struct iwl_tx_queue_cfg_cmd *cmd,
+                                __le16 flags, u8 sta_id, u8 tid,
                                 int cmd_id, int size,
                                 unsigned int timeout);
 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
index d017aa2a0a8bd7bed6c75f5f20da83460cca1c7a..e965cc5888500bcc2ea9b8866a3b9c4ba1792934 100644 (file)
@@ -17,9 +17,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program.
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
@@ -776,7 +773,7 @@ err:
        return -ENOMEM;
 }
 
-static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rb_allocator *rba = &trans_pcie->rba;
@@ -1002,7 +999,7 @@ int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
        return 0;
 }
 
-static int _iwl_pcie_rx_init(struct iwl_trans *trans)
+int _iwl_pcie_rx_init(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *def_rxq;
@@ -1107,6 +1104,9 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
 
 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
 {
+       /* Set interrupt coalescing timer to default (2048 usecs) */
+       iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
        /*
         * We don't configure the RFH.
         * Restock will be done at alive, after firmware configured the RFH.
@@ -1144,6 +1144,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
        kfree(trans_pcie->rxq);
 }
 
+static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
+                                         struct iwl_rb_allocator *rba)
+{
+       spin_lock(&rba->lock);
+       list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+       spin_unlock(&rba->lock);
+}
+
 /*
  * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
  *
@@ -1175,9 +1183,7 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
        if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
                /* Move the 2 RBDs to the allocator ownership.
                 Allocator has another 6 from pool for the request completion*/
-               spin_lock(&rba->lock);
-               list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
-               spin_unlock(&rba->lock);
+               iwl_pcie_rx_move_to_allocator(rxq, rba);
 
                atomic_inc(&rba->req_pending);
                queue_work(rba->alloc_wq, &rba->rx_alloc);
@@ -1187,7 +1193,8 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                                struct iwl_rxq *rxq,
                                struct iwl_rx_mem_buffer *rxb,
-                               bool emergency)
+                               bool emergency,
+                               int i)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
@@ -1213,6 +1220,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                        .truesize = max_len,
                };
 
+               if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+                       rxcb.status = rxq->cd[i].status;
+
                pkt = rxb_addr(&rxcb);
 
                if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
@@ -1267,7 +1277,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                index = SEQ_TO_INDEX(sequence);
                cmd_index = iwl_pcie_get_cmd_index(txq, index);
 
-               if (rxq->id == 0)
+               if (rxq->id == trans_pcie->def_rx_queue)
                        iwl_op_mode_rx(trans->op_mode, &rxq->napi,
                                       &rxcb);
                else
@@ -1396,17 +1406,25 @@ restart:
                IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
 
        while (i != r) {
+               struct iwl_rb_allocator *rba = &trans_pcie->rba;
                struct iwl_rx_mem_buffer *rxb;
-
-               if (unlikely(rxq->used_count == rxq->queue_size / 2))
+               /* number of RBDs still waiting for page allocation */
+               u32 rb_pending_alloc =
+                       atomic_read(&trans_pcie->rba.req_pending) *
+                       RX_CLAIM_REQ_ALLOC;
+
+               if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
+                            !emergency)) {
+                       iwl_pcie_rx_move_to_allocator(rxq, rba);
                        emergency = true;
+               }
 
                rxb = iwl_pcie_get_rxb(trans, rxq, i);
                if (!rxb)
                        goto out;
 
                IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
-               iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
+               iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
 
                i = (i + 1) & (rxq->queue_size - 1);
 
@@ -1421,17 +1439,13 @@ restart:
                        iwl_pcie_rx_allocator_get(trans, rxq);
 
                if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
-                       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-
                        /* Add the remaining empty RBDs for allocator use */
-                       spin_lock(&rba->lock);
-                       list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
-                       spin_unlock(&rba->lock);
+                       iwl_pcie_rx_move_to_allocator(rxq, rba);
                } else if (emergency) {
                        count++;
                        if (count == 8) {
                                count = 0;
-                               if (rxq->used_count < rxq->queue_size / 3)
+                               if (rb_pending_alloc < rxq->queue_size / 3)
                                        emergency = false;
 
                                rxq->read = i;
index 2bc67219ed3efadd09597a28fe18e39ad5f1d736..77f3610e5ca94d9e892459cb016ff8eba0dffd87 100644 (file)
 #include "iwl-context-info.h"
 #include "iwl-context-info-gen3.h"
 #include "internal.h"
+#include "fw/dbg.h"
 
 /*
  * Start up NIC's basic functionality after it has been reset
  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
  * NOTE:  This does not load uCode nor start the embedded processor
  */
-static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
+int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
 {
        int ret = 0;
 
@@ -164,9 +165,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
        trans_pcie->is_down = true;
 
        /* Stop dbgc before stopping device */
-       iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
-       udelay(100);
-       iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+       _iwl_fw_dbg_stop_recording(trans, NULL);
 
        /* tell the device to stop sending interrupts */
        iwl_disable_interrupts(trans);
@@ -265,7 +264,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
                return -ENOMEM;
 
        /* Allocate or reset and init all Tx and Command queues */
-       if (iwl_pcie_gen2_tx_init(trans))
+       if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, TFD_CMD_SLOTS))
                return -ENOMEM;
 
        /* enable shadow regs in HW */
index 7d319b6863feb2e14e8702907603c3f13b43de53..5bafb3f46eb8ace3c5668f64133f7bb83a2630e3 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
  * The full GNU General Public License is included in this distribution
  * in the file called COPYING.
  *
@@ -92,7 +87,7 @@
 #define IWL_FW_MEM_EXTENDED_START      0x40000
 #define IWL_FW_MEM_EXTENDED_END                0x57FFF
 
-static void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
+void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
 {
 #define PCI_DUMP_SIZE  64
 #define PREFIX_LEN     32
@@ -190,72 +185,42 @@ static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
 
 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-       if (!trans_pcie->fw_mon_page)
-               return;
+       int i;
 
-       dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
-                      trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
-       __free_pages(trans_pcie->fw_mon_page,
-                    get_order(trans_pcie->fw_mon_size));
-       trans_pcie->fw_mon_page = NULL;
-       trans_pcie->fw_mon_phys = 0;
-       trans_pcie->fw_mon_size = 0;
+       for (i = 0; i < trans->num_blocks; i++) {
+               dma_free_coherent(trans->dev, trans->fw_mon[i].size,
+                                 trans->fw_mon[i].block,
+                                 trans->fw_mon[i].physical);
+               trans->fw_mon[i].block = NULL;
+               trans->fw_mon[i].physical = 0;
+               trans->fw_mon[i].size = 0;
+               trans->num_blocks--;
+       }
 }
 
-void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
+static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
+                                           u8 max_power, u8 min_power)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct page *page = NULL;
-       dma_addr_t phys;
+       void *cpu_addr = NULL;
+       dma_addr_t phys = 0;
        u32 size = 0;
        u8 power;
 
-       if (!max_power) {
-               /* default max_power is maximum */
-               max_power = 26;
-       } else {
-               max_power += 11;
-       }
-
-       if (WARN(max_power > 26,
-                "External buffer size for monitor is too big %d, check the FW TLV\n",
-                max_power))
-               return;
-
-       if (trans_pcie->fw_mon_page) {
-               dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
-                                          trans_pcie->fw_mon_size,
-                                          DMA_FROM_DEVICE);
-               return;
-       }
-
-       phys = 0;
-       for (power = max_power; power >= 11; power--) {
-               int order;
-
+       for (power = max_power; power >= min_power; power--) {
                size = BIT(power);
-               order = get_order(size);
-               page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
-                                  order);
-               if (!page)
+               cpu_addr = dma_alloc_coherent(trans->dev, size, &phys,
+                                             GFP_KERNEL | __GFP_NOWARN |
+                                             __GFP_ZERO | __GFP_COMP);
+               if (!cpu_addr)
                        continue;
 
-               phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
-                                   DMA_FROM_DEVICE);
-               if (dma_mapping_error(trans->dev, phys)) {
-                       __free_pages(page, order);
-                       page = NULL;
-                       continue;
-               }
                IWL_INFO(trans,
-                        "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
-                        size, order);
+                        "Allocated 0x%08x bytes for firmware monitor.\n",
+                        size);
                break;
        }
 
-       if (WARN_ON_ONCE(!page))
+       if (WARN_ON_ONCE(!cpu_addr))
                return;
 
        if (power != max_power)
@@ -264,9 +229,34 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
                        (unsigned long)BIT(power - 10),
                        (unsigned long)BIT(max_power - 10));
 
-       trans_pcie->fw_mon_page = page;
-       trans_pcie->fw_mon_phys = phys;
-       trans_pcie->fw_mon_size = size;
+       trans->fw_mon[trans->num_blocks].block = cpu_addr;
+       trans->fw_mon[trans->num_blocks].physical = phys;
+       trans->fw_mon[trans->num_blocks].size = size;
+       trans->num_blocks++;
+}
+
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
+{
+       if (!max_power) {
+               /* default max_power is maximum */
+               max_power = 26;
+       } else {
+               max_power += 11;
+       }
+
+       if (WARN(max_power > 26,
+                "External buffer size for monitor is too big %d, check the FW TLV\n",
+                max_power))
+               return;
+
+       /*
+        * This function allocats the default fw monitor.
+        * The optional additional ones will be allocated in runtime
+        */
+       if (trans->num_blocks)
+               return;
+
+       iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
 }
 
 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
@@ -930,7 +920,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
 
 void iwl_pcie_apply_destination(struct iwl_trans *trans)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
        int i;
 
@@ -942,7 +931,7 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
        else
                IWL_WARN(trans, "PCI should have external buffer debug\n");
 
-       for (i = 0; i < trans->dbg_dest_reg_num; i++) {
+       for (i = 0; i < trans->dbg_n_dest_reg; i++) {
                u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
                u32 val = le32_to_cpu(dest->reg_ops[i].val);
 
@@ -981,18 +970,18 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
        }
 
 monitor:
-       if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
+       if (dest->monitor_mode == EXTERNAL_MODE && trans->fw_mon[0].size) {
                iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
-                              trans_pcie->fw_mon_phys >> dest->base_shift);
+                              trans->fw_mon[0].physical >> dest->base_shift);
                if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
                        iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
-                                      (trans_pcie->fw_mon_phys +
-                                       trans_pcie->fw_mon_size - 256) >>
+                                      (trans->fw_mon[0].physical +
+                                       trans->fw_mon[0].size - 256) >>
                                                dest->end_shift);
                else
                        iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
-                                      (trans_pcie->fw_mon_phys +
-                                       trans_pcie->fw_mon_size) >>
+                                      (trans->fw_mon[0].physical +
+                                       trans->fw_mon[0].size) >>
                                                dest->end_shift);
        }
 }
@@ -1000,7 +989,6 @@ monitor:
 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
                                const struct fw_img *image)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int ret = 0;
        int first_ucode_section;
 
@@ -1030,12 +1018,12 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
            trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
                iwl_pcie_alloc_fw_monitor(trans, 0);
 
-               if (trans_pcie->fw_mon_size) {
+               if (trans->fw_mon[0].size) {
                        iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
-                                      trans_pcie->fw_mon_phys >> 4);
+                                      trans->fw_mon[0].physical >> 4);
                        iwl_write_prph(trans, MON_BUFF_END_ADDR,
-                                      (trans_pcie->fw_mon_phys +
-                                       trans_pcie->fw_mon_size) >> 4);
+                                      (trans->fw_mon[0].physical +
+                                       trans->fw_mon[0].size) >> 4);
                }
        } else if (trans->dbg_dest_tlv) {
                iwl_pcie_apply_destination(trans);
@@ -1262,13 +1250,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
        trans_pcie->is_down = true;
 
        /* Stop dbgc before stopping device */
-       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
-               iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
-       } else {
-               iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
-               udelay(100);
-               iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
-       }
+       _iwl_fw_dbg_stop_recording(trans, NULL);
 
        /* tell the device to stop sending interrupts */
        iwl_disable_interrupts(trans);
@@ -1830,18 +1812,30 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
        return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
 }
 
+static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
+{
+       if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+               return 0x00FFFFFF;
+       else
+               return 0x000FFFFF;
+}
+
 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
 {
+       u32 mask = iwl_trans_pcie_prph_msk(trans);
+
        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
-                              ((reg & 0x000FFFFF) | (3 << 24)));
+                              ((reg & mask) | (3 << 24)));
        return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
 }
 
 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
                                      u32 val)
 {
+       u32 mask = iwl_trans_pcie_prph_msk(trans);
+
        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
-                              ((addr & 0x000FFFFF) | (3 << 24)));
+                              ((addr & mask) | (3 << 24)));
        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
 }
 
@@ -2013,7 +2007,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
                if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
                        struct iwl_trans_pcie_removal *removal;
 
-                       if (trans_pcie->scheduled_for_removal)
+                       if (test_bit(STATUS_TRANS_DEAD, &trans->status))
                                goto err;
 
                        IWL_ERR(trans, "Device gone - scheduling removal!\n");
@@ -2039,7 +2033,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
                         * we don't need to clear this flag, because
                         * the trans will be freed and reallocated.
                        */
-                       trans_pcie->scheduled_for_removal = true;
+                       set_bit(STATUS_TRANS_DEAD, &trans->status);
 
                        removal->pdev = to_pci_dev(trans->dev);
                        INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
@@ -2266,6 +2260,10 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
        unsigned long now = jiffies;
        u8 wr_ptr;
 
+       /* Make sure the NIC is still alive in the bus */
+       if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+               return -ENODEV;
+
        if (!test_bit(txq_idx, trans_pcie->queue_used))
                return -EINVAL;
 
@@ -2861,10 +2859,9 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
                            struct iwl_fw_error_dump_data **data,
                            u32 monitor_len)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        u32 len = 0;
 
-       if ((trans_pcie->fw_mon_page &&
+       if ((trans->num_blocks &&
             trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
            trans->dbg_dest_tlv) {
                struct iwl_fw_error_dump_fw_mon *fw_mon_data;
@@ -2892,22 +2889,12 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
                        cpu_to_le32(iwl_read_prph(trans, base));
 
                len += sizeof(**data) + sizeof(*fw_mon_data);
-               if (trans_pcie->fw_mon_page) {
-                       /*
-                        * The firmware is now asserted, it won't write anything
-                        * to the buffer. CPU can take ownership to fetch the
-                        * data. The buffer will be handed back to the device
-                        * before the firmware will be restarted.
-                        */
-                       dma_sync_single_for_cpu(trans->dev,
-                                               trans_pcie->fw_mon_phys,
-                                               trans_pcie->fw_mon_size,
-                                               DMA_FROM_DEVICE);
+               if (trans->num_blocks) {
                        memcpy(fw_mon_data->data,
-                              page_address(trans_pcie->fw_mon_page),
-                              trans_pcie->fw_mon_size);
+                              trans->fw_mon[0].block,
+                              trans->fw_mon[0].size);
 
-                       monitor_len = trans_pcie->fw_mon_size;
+                       monitor_len = trans->fw_mon[0].size;
                } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
                        /*
                         * Update pointers to reflect actual values after
@@ -2943,36 +2930,15 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
        return len;
 }
 
-static struct iwl_trans_dump_data
-*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
-                         const struct iwl_fw_dbg_trigger_tlv *trigger)
+static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_fw_error_dump_data *data;
-       struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
-       struct iwl_fw_error_dump_txcmd *txcmd;
-       struct iwl_trans_dump_data *dump_data;
-       u32 len, num_rbs = 0;
-       u32 monitor_len;
-       int i, ptr;
-       bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
-                       !trans->cfg->mq_rx_supported &&
-                       trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
-
-       /* transport dump header */
-       len = sizeof(*dump_data);
-
-       /* host commands */
-       len += sizeof(*data) +
-               cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
-
-       /* FW monitor */
-       if (trans_pcie->fw_mon_page) {
-               len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
-                      trans_pcie->fw_mon_size;
-               monitor_len = trans_pcie->fw_mon_size;
+       if (trans->num_blocks) {
+               *len += sizeof(struct iwl_fw_error_dump_data) +
+                       sizeof(struct iwl_fw_error_dump_fw_mon) +
+                       trans->fw_mon[0].size;
+               return trans->fw_mon[0].size;
        } else if (trans->dbg_dest_tlv) {
-               u32 base, end, cfg_reg;
+               u32 base, end, cfg_reg, monitor_len;
 
                if (trans->dbg_dest_tlv->version == 1) {
                        cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
@@ -3002,11 +2968,39 @@ static struct iwl_trans_dump_data
                                end += (1 << trans->dbg_dest_tlv->end_shift);
                        monitor_len = end - base;
                }
-               len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
-                      monitor_len;
-       } else {
-               monitor_len = 0;
+               *len += sizeof(struct iwl_fw_error_dump_data) +
+                       sizeof(struct iwl_fw_error_dump_fw_mon) +
+                       monitor_len;
+               return monitor_len;
        }
+       return 0;
+}
+
+static struct iwl_trans_dump_data
+*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+                         const struct iwl_fw_dbg_trigger_tlv *trigger)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_fw_error_dump_data *data;
+       struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
+       struct iwl_fw_error_dump_txcmd *txcmd;
+       struct iwl_trans_dump_data *dump_data;
+       u32 len, num_rbs = 0;
+       u32 monitor_len;
+       int i, ptr;
+       bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
+                       !trans->cfg->mq_rx_supported &&
+                       trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
+
+       /* transport dump header */
+       len = sizeof(*dump_data);
+
+       /* host commands */
+       len += sizeof(*data) +
+               cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
+
+       /* FW monitor */
+       monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
 
        if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
                if (!(trans->dbg_dump_mask &
@@ -3175,7 +3169,6 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
        .ref = iwl_trans_pcie_ref,                                      \
        .unref = iwl_trans_pcie_unref,                                  \
        .dump_data = iwl_trans_pcie_dump_data,                          \
-       .dump_regs = iwl_trans_pcie_dump_regs,                          \
        .d3_suspend = iwl_trans_pcie_d3_suspend,                        \
        .d3_resume = iwl_trans_pcie_d3_resume
 
@@ -3277,6 +3270,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                                       PCIE_LINK_STATE_CLKPM);
        }
 
+       trans_pcie->def_rx_queue = 0;
+
        if (cfg->use_tfh) {
                addr_size = 64;
                trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
@@ -3327,6 +3322,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        iwl_disable_interrupts(trans);
 
        trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
+       if (trans->hw_rev == 0xffffffff) {
+               dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
+               ret = -EIO;
+               goto out_no_pci;
+       }
+
        /*
         * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
         * changed, and now the revision step also includes bit 0-1 (no more
index b99f33ff912306f5638bb95f9578df88b82aadc4..b71cf55480fce7a5a4be7fbf2543036146b97733 100644 (file)
@@ -87,9 +87,9 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
 /*
  * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
  */
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
-                                         struct iwl_txq *txq, u16 byte_cnt,
-                                         int num_tbs)
+void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+                                  struct iwl_txq *txq, u16 byte_cnt,
+                                  int num_tbs)
 {
        struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
        struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
@@ -127,8 +127,8 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
 /*
  * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
  */
-static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
-                                        struct iwl_txq *txq)
+void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
+                                 struct iwl_txq *txq)
 {
        lockdep_assert_held(&txq->lock);
 
@@ -416,6 +416,37 @@ out_err:
        return NULL;
 }
 
+static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
+                                     struct sk_buff *skb,
+                                     struct iwl_tfh_tfd *tfd,
+                                     struct iwl_cmd_meta *out_meta)
+{
+       int i;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               dma_addr_t tb_phys;
+               int tb_idx;
+
+               if (!skb_frag_size(frag))
+                       continue;
+
+               tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+                                          skb_frag_size(frag), DMA_TO_DEVICE);
+
+               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+                       return -ENOMEM;
+               tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
+                                             skb_frag_size(frag));
+               if (tb_idx < 0)
+                       return tb_idx;
+
+               out_meta->tbs |= BIT(tb_idx);
+       }
+
+       return 0;
+}
+
 static struct
 iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
                                    struct iwl_txq *txq,
@@ -428,7 +459,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
        int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
        struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
        dma_addr_t tb_phys;
-       int i, len, tb1_len, tb2_len;
+       int len, tb1_len, tb2_len;
        void *tb1_addr;
 
        tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
@@ -467,24 +498,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
                iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
        }
 
-       /* set up the remaining entries to point to the data */
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-               int tb_idx;
-
-               if (!skb_frag_size(frag))
-                       continue;
-
-               tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
-                                          skb_frag_size(frag), DMA_TO_DEVICE);
-
-               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
-                       goto out_err;
-               tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
-                                             skb_frag_size(frag));
-
-               out_meta->tbs |= BIT(tb_idx);
-       }
+       if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
+               goto out_err;
 
        trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
                             IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
@@ -526,7 +541,12 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
 
        hdr_len = ieee80211_hdrlen(hdr->frame_control);
 
-       if (amsdu)
+       /*
+        * Only build A-MSDUs here if doing so by GSO, otherwise it may be
+        * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
+        * built in the higher layers already.
+        */
+       if (amsdu && skb_shinfo(skb)->gso_size)
                return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
                                                    out_meta, hdr_len, len);
 
@@ -1065,8 +1085,8 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
        iwl_wake_queue(trans, txq);
 }
 
-static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
-                                         struct iwl_txq *txq)
+void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
+                                  struct iwl_txq *txq)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct device *dev = trans->dev;
@@ -1120,23 +1140,13 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
        clear_bit(txq_id, trans_pcie->queue_used);
 }
 
-int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
-                                struct iwl_tx_queue_cfg_cmd *cmd,
-                                int cmd_id, int size,
-                                unsigned int timeout)
+int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
+                                    struct iwl_txq **intxq, int size,
+                                    unsigned int timeout)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_tx_queue_cfg_rsp *rsp;
-       struct iwl_txq *txq;
-       struct iwl_host_cmd hcmd = {
-               .id = cmd_id,
-               .len = { sizeof(*cmd) },
-               .data = { cmd, },
-               .flags = CMD_WANT_SKB,
-       };
-       int ret, qid;
-       u32 wr_ptr;
+       int ret;
 
+       struct iwl_txq *txq;
        txq = kzalloc(sizeof(*txq), GFP_KERNEL);
        if (!txq)
                return -ENOMEM;
@@ -1164,20 +1174,30 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
 
        txq->wd_timeout = msecs_to_jiffies(timeout);
 
-       cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
-       cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
-       cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+       *intxq = txq;
+       return 0;
 
-       ret = iwl_trans_send_cmd(trans, &hcmd);
-       if (ret)
-               goto error;
+error:
+       iwl_pcie_gen2_txq_free_memory(trans, txq);
+       return ret;
+}
+
+int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
+                                     struct iwl_txq *txq,
+                                     struct iwl_host_cmd *hcmd)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_tx_queue_cfg_rsp *rsp;
+       int ret, qid;
+       u32 wr_ptr;
 
-       if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
+       if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
+                   sizeof(*rsp))) {
                ret = -EINVAL;
                goto error_free_resp;
        }
 
-       rsp = (void *)hcmd.resp_pkt->data;
+       rsp = (void *)hcmd->resp_pkt->data;
        qid = le16_to_cpu(rsp->queue_number);
        wr_ptr = le16_to_cpu(rsp->write_pointer);
 
@@ -1204,11 +1224,48 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
                           (txq->write_ptr) | (qid << 16));
        IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
 
-       iwl_free_resp(&hcmd);
+       iwl_free_resp(hcmd);
        return qid;
 
 error_free_resp:
-       iwl_free_resp(&hcmd);
+       iwl_free_resp(hcmd);
+       iwl_pcie_gen2_txq_free_memory(trans, txq);
+       return ret;
+}
+
+int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
+                                __le16 flags, u8 sta_id, u8 tid,
+                                int cmd_id, int size,
+                                unsigned int timeout)
+{
+       struct iwl_txq *txq = NULL;
+       struct iwl_tx_queue_cfg_cmd cmd = {
+               .flags = flags,
+               .sta_id = sta_id,
+               .tid = tid,
+       };
+       struct iwl_host_cmd hcmd = {
+               .id = cmd_id,
+               .len = { sizeof(cmd) },
+               .data = { &cmd, },
+               .flags = CMD_WANT_SKB,
+       };
+       int ret;
+
+       ret = iwl_trans_pcie_dyn_txq_alloc_dma(trans, &txq, size, timeout);
+       if (ret)
+               return ret;
+
+       cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
+       cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+       cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+
+       ret = iwl_trans_send_cmd(trans, &hcmd);
+       if (ret)
+               goto error;
+
+       return iwl_trans_pcie_txq_alloc_response(trans, txq, &hcmd);
+
 error:
        iwl_pcie_gen2_txq_free_memory(trans, txq);
        return ret;
@@ -1251,30 +1308,31 @@ void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
        }
 }
 
-int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
+int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_txq *cmd_queue;
-       int txq_id = trans_pcie->cmd_queue, ret;
+       struct iwl_txq *queue;
+       int ret;
 
-       /* alloc and init the command queue */
+       /* alloc and init the tx queue */
        if (!trans_pcie->txq[txq_id]) {
-               cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
-               if (!cmd_queue) {
-                       IWL_ERR(trans, "Not enough memory for command queue\n");
+               queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+               if (!queue) {
+                       IWL_ERR(trans, "Not enough memory for tx queue\n");
                        return -ENOMEM;
                }
-               trans_pcie->txq[txq_id] = cmd_queue;
-               ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
+               trans_pcie->txq[txq_id] = queue;
+               ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
                if (ret) {
                        IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
                        goto error;
                }
        } else {
-               cmd_queue = trans_pcie->txq[txq_id];
+               queue = trans_pcie->txq[txq_id];
        }
 
-       ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
+       ret = iwl_pcie_txq_init(trans, queue, queue_size,
+                               (txq_id == trans_pcie->cmd_queue));
        if (ret) {
                IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
                goto error;
index 93f0d387688a1314a54a8f17f4c02153dadd802d..f227b91098c9812715399f7a578d25cafa09fdc9 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
  *
@@ -1101,7 +1097,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
        if (!iwl_queue_used(txq, last_to_free)) {
                IWL_ERR(trans,
-                       "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+                       "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
                        __func__, txq_id, last_to_free,
                        trans->cfg->base_params->max_tfd_queue_size,
                        txq->write_ptr, txq->read_ptr);
@@ -1188,6 +1184,10 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
 
        lockdep_assert_held(&trans_pcie->reg_lock);
 
+       /* Make sure the NIC is still alive in the bus */
+       if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+               return -ENODEV;
+
        if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
            !trans_pcie->ref_cmd_in_flight) {
                trans_pcie->ref_cmd_in_flight = true;
@@ -1230,7 +1230,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
  * need to be reclaimed. As result, some free space forms.  If there is
  * enough free space (> low mark), wake the stack that feeds us.
  */
-static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq = trans_pcie->txq[txq_id];
@@ -1912,7 +1912,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
        }
 
        if (test_bit(STATUS_FW_ERROR, &trans->status)) {
-               iwl_trans_dump_regs(trans);
+               iwl_trans_pcie_dump_regs(trans);
                IWL_ERR(trans, "FW error in SYNC CMD %s\n",
                        iwl_get_cmd_string(trans, cmd->id));
                dump_stack();
@@ -1957,6 +1957,10 @@ cancel:
 
 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
 {
+       /* Make sure the NIC is still alive in the bus */
+       if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+               return -ENODEV;
+
        if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
            test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
                IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
@@ -1973,29 +1977,24 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
 
 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
                             struct iwl_txq *txq, u8 hdr_len,
-                            struct iwl_cmd_meta *out_meta,
-                            struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+                            struct iwl_cmd_meta *out_meta)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       u16 tb2_len;
+       u16 head_tb_len;
        int i;
 
        /*
         * Set up TFD's third entry to point directly to remainder
         * of skb's head, if any
         */
-       tb2_len = skb_headlen(skb) - hdr_len;
+       head_tb_len = skb_headlen(skb) - hdr_len;
 
-       if (tb2_len > 0) {
-               dma_addr_t tb2_phys = dma_map_single(trans->dev,
-                                                    skb->data + hdr_len,
-                                                    tb2_len, DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
-                       iwl_pcie_tfd_unmap(trans, out_meta, txq,
-                                          txq->write_ptr);
+       if (head_tb_len > 0) {
+               dma_addr_t tb_phys = dma_map_single(trans->dev,
+                                                   skb->data + hdr_len,
+                                                   head_tb_len, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
                        return -EINVAL;
-               }
-               iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
+               iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
        }
 
        /* set up the remaining entries to point to the data */
@@ -2010,23 +2009,16 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
                tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
                                           skb_frag_size(frag), DMA_TO_DEVICE);
 
-               if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
-                       iwl_pcie_tfd_unmap(trans, out_meta, txq,
-                                          txq->write_ptr);
+               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
                        return -EINVAL;
-               }
                tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
                                                skb_frag_size(frag), false);
+               if (tb_idx < 0)
+                       return tb_idx;
 
                out_meta->tbs |= BIT(tb_idx);
        }
 
-       trace_iwlwifi_dev_tx(trans->dev, skb,
-                            iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
-                            trans_pcie->tfd_size,
-                            &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
-                            hdr_len);
-       trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
        return 0;
 }
 
@@ -2087,7 +2079,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
        u8 *start_hdr;
        struct iwl_tso_hdr_page *hdr_page;
        struct page **page_ptr;
-       int ret;
        struct tso_t tso;
 
        /* if the packet is protected, then it must be CCMP or GCMP */
@@ -2173,10 +2164,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                if (trans_pcie->sw_csum_tx) {
                        csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
                                             GFP_ATOMIC);
-                       if (!csum_skb) {
-                               ret = -ENOMEM;
-                               goto out_unmap;
-                       }
+                       if (!csum_skb)
+                               return -ENOMEM;
 
                        iwl_compute_pseudo_hdr_csum(iph, tcph,
                                                    skb->protocol ==
@@ -2197,8 +2186,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                                             hdr_tb_len, DMA_TO_DEVICE);
                if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
                        dev_kfree_skb(csum_skb);
-                       ret = -EINVAL;
-                       goto out_unmap;
+                       return -EINVAL;
                }
                iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
                                       hdr_tb_len, false);
@@ -2223,8 +2211,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                                                 size, DMA_TO_DEVICE);
                        if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
                                dev_kfree_skb(csum_skb);
-                               ret = -EINVAL;
-                               goto out_unmap;
+                               return -EINVAL;
                        }
 
                        iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
@@ -2258,10 +2245,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
        skb_push(skb, hdr_len + iv_len);
 
        return 0;
-
-out_unmap:
-       iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
-       return ret;
 }
 #else /* CONFIG_INET */
 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
@@ -2426,9 +2409,26 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                                                     out_meta, dev_cmd,
                                                     tb1_len)))
                        goto out_err;
-       } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
-                                      out_meta, dev_cmd, tb1_len))) {
-               goto out_err;
+       } else {
+               struct sk_buff *frag;
+
+               if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
+                                              out_meta)))
+                       goto out_err;
+
+               skb_walk_frags(skb, frag) {
+                       if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
+                                                      out_meta)))
+                               goto out_err;
+               }
+
+               trace_iwlwifi_dev_tx(trans->dev, skb,
+                                    iwl_pcie_get_tfd(trans, txq,
+                                                     txq->write_ptr),
+                                    trans_pcie->tfd_size,
+                                    &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
+                                    hdr_len);
+               trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
        }
 
        /* building the A-MSDU might have changed this data, so memcpy it now */
@@ -2473,6 +2473,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        spin_unlock(&txq->lock);
        return 0;
 out_err:
+       iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
        spin_unlock(&txq->lock);
        return -1;
 }
index 94ad6fe29e69bdf199157e78023e15595e22daa9..21bb68457cfe91e4297c04533a192be1fdf22c8a 100644 (file)
 #define URB_ASYNC_UNLINK 0
 #endif
 
-/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
-static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
-#define ENCAPS_OVERHEAD                (sizeof(encaps_hdr) + 2)
-
 struct header_struct {
        /* 802.3 */
        u8 dest[ETH_ALEN];
@@ -915,7 +911,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
        default:
                err("%s: Unexpected context state %d", __func__,
                    state);
-               /* fall though */
+               /* fall through */
        case EZUSB_CTX_REQ_TIMEOUT:
        case EZUSB_CTX_REQ_FAILED:
        case EZUSB_CTX_RESP_TIMEOUT:
index f3863101af783d74487a4084f2bdddba908f6eb3..6f2730c7229b970d87a9b1bffc8c316482f3db88 100644 (file)
@@ -521,7 +521,6 @@ struct mac80211_hwsim_data {
        int channels, idx;
        bool use_chanctx;
        bool destroy_on_close;
-       struct work_struct destroy_work;
        u32 portid;
        char alpha2[2];
        const struct ieee80211_regdomain *regd;
@@ -2931,8 +2930,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        hwsim_radios_generation++;
        spin_unlock_bh(&hwsim_radio_lock);
 
-       if (idx > 0)
-               hwsim_mcast_new_radio(idx, info, param);
+       hwsim_mcast_new_radio(idx, info, param);
 
        return idx;
 
@@ -3561,30 +3559,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
        .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
 };
 
-static void destroy_radio(struct work_struct *work)
-{
-       struct mac80211_hwsim_data *data =
-               container_of(work, struct mac80211_hwsim_data, destroy_work);
-
-       hwsim_radios_generation++;
-       mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
-}
-
 static void remove_user_radios(u32 portid)
 {
        struct mac80211_hwsim_data *entry, *tmp;
+       LIST_HEAD(list);
 
        spin_lock_bh(&hwsim_radio_lock);
        list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
                if (entry->destroy_on_close && entry->portid == portid) {
-                       list_del(&entry->list);
+                       list_move(&entry->list, &list);
                        rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
                                               hwsim_rht_params);
-                       INIT_WORK(&entry->destroy_work, destroy_radio);
-                       queue_work(hwsim_wq, &entry->destroy_work);
+                       hwsim_radios_generation++;
                }
        }
        spin_unlock_bh(&hwsim_radio_lock);
+
+       list_for_each_entry_safe(entry, tmp, &list, list) {
+               list_del(&entry->list);
+               mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy),
+                                        NULL);
+       }
 }
 
 static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
@@ -3642,6 +3637,7 @@ static __net_init int hwsim_init_net(struct net *net)
 static void __net_exit hwsim_exit_net(struct net *net)
 {
        struct mac80211_hwsim_data *data, *tmp;
+       LIST_HEAD(list);
 
        spin_lock_bh(&hwsim_radio_lock);
        list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
@@ -3652,17 +3648,19 @@ static void __net_exit hwsim_exit_net(struct net *net)
                if (data->netgroup == hwsim_net_get_netgroup(&init_net))
                        continue;
 
-               list_del(&data->list);
+               list_move(&data->list, &list);
                rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
                                       hwsim_rht_params);
                hwsim_radios_generation++;
-               spin_unlock_bh(&hwsim_radio_lock);
+       }
+       spin_unlock_bh(&hwsim_radio_lock);
+
+       list_for_each_entry_safe(data, tmp, &list, list) {
+               list_del(&data->list);
                mac80211_hwsim_del_radio(data,
                                         wiphy_name(data->hw->wiphy),
                                         NULL);
-               spin_lock_bh(&hwsim_radio_lock);
        }
-       spin_unlock_bh(&hwsim_radio_lock);
 
        ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
 }
index e92fc5001171714e50bfb36bd195d3d8ef421ced..789337ea676acd506cf9829bb5f7dba00ec2f6dc 100644 (file)
@@ -605,9 +605,10 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
 {
        unsigned long flags;
 
-       if (recvlength > LBS_CMD_BUFFER_SIZE) {
+       if (recvlength < MESSAGE_HEADER_LEN ||
+           recvlength > LBS_CMD_BUFFER_SIZE) {
                lbtf_deb_usbd(&cardp->udev->dev,
-                            "The receive buffer is too large\n");
+                            "The receive buffer is invalid: %d\n", recvlength);
                kfree_skb(skb);
                return;
        }
index 433c6a16870b6b5ef92b782dd13a3cf95f7a520e..d445acc4786b75ce562477f9579dbd9a1fed0485 100644 (file)
@@ -298,6 +298,19 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size)
        struct mwifiex_adapter *adapter = ctx->adapter;
        struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
 
+       if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
+               if (card->rx_cmd_ep == ctx->ep) {
+                       mwifiex_dbg(adapter, INFO, "%s: free rx_cmd skb\n",
+                                   __func__);
+                       dev_kfree_skb_any(ctx->skb);
+                       ctx->skb = NULL;
+               }
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: card removed/suspended, EP %d rx_cmd URB submit skipped\n",
+                           __func__, ctx->ep);
+               return -1;
+       }
+
        if (card->rx_cmd_ep != ctx->ep) {
                ctx->skb = dev_alloc_skb(size);
                if (!ctx->skb) {
index b6c5f17dca30a57395af65a4a828e95a58f75e3b..0ccbcd7e887d67d1352886736411e13c0813ccfb 100644 (file)
@@ -5,33 +5,13 @@ config MT76_USB
        tristate
        depends on MT76_CORE
 
-config MT76x2_COMMON
+config MT76x02_LIB
        tristate
-       depends on MT76_CORE
-
-config MT76x0U
-       tristate "MediaTek MT76x0U (USB) support"
-       select MT76_CORE
-       depends on MAC80211
-       depends on USB
-       help
-         This adds support for MT7610U-based wireless USB dongles.
-
-config MT76x2E
-       tristate "MediaTek MT76x2E (PCIe) support"
        select MT76_CORE
-       select MT76x2_COMMON
-       depends on MAC80211
-       depends on PCI
-       ---help---
-         This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
 
-config MT76x2U
-       tristate "MediaTek MT76x2U (USB) support"
-       select MT76_CORE
+config MT76x02_USB
+       tristate
        select MT76_USB
-       select MT76x2_COMMON
-       depends on MAC80211
-       depends on USB
-       help
-         This adds support for MT7612U-based wireless USB dongles.
+
+source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
index 158d10d2716c4e0afd3d3926445083d6377680d0..9b8d7488c5454dfe1d17aea07db022b61400bcf1 100644 (file)
@@ -1,9 +1,7 @@
 obj-$(CONFIG_MT76_CORE) += mt76.o
 obj-$(CONFIG_MT76_USB) += mt76-usb.o
-obj-$(CONFIG_MT76x0U) += mt76x0/
-obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
-obj-$(CONFIG_MT76x2E) += mt76x2e.o
-obj-$(CONFIG_MT76x2U) += mt76x2u.o
+obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o
+obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
 
 mt76-y := \
        mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
@@ -12,20 +10,13 @@ mt76-usb-y := usb.o usb_trace.o usb_mcu.o
 
 CFLAGS_trace.o := -I$(src)
 CFLAGS_usb_trace.o := -I$(src)
+CFLAGS_mt76x02_trace.o := -I$(src)
 
-mt76x2-common-y := \
-       mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \
-       mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \
-       mt76x2_debugfs.o
+mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
+                mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \
+                mt76x02_txrx.o mt76x02_trace.o
 
-mt76x2e-y := \
-       mt76x2_pci.o mt76x2_dma.o \
-       mt76x2_main.o mt76x2_init.o mt76x2_tx.o \
-       mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \
-       mt76x2_dfs.o mt76x2_trace.o
+mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
 
-mt76x2u-y := \
-       mt76x2_usb.o mt76x2u_init.o mt76x2u_main.o mt76x2u_mac.o \
-       mt76x2u_mcu.o mt76x2u_phy.o mt76x2u_core.o
-
-CFLAGS_mt76x2_trace.o := -I$(src)
+obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
index a38d05dea599c28a44c7254a3bf799fa3617340b..a5adf22c3ffad133d2a87acc8d0af9c46b73b248 100644 (file)
@@ -56,6 +56,35 @@ mt76_queues_read(struct seq_file *s, void *data)
        return 0;
 }
 
+void mt76_seq_puts_array(struct seq_file *file, const char *str,
+                        s8 *val, int len)
+{
+       int i;
+
+       seq_printf(file, "%10s:", str);
+       for (i = 0; i < len; i++)
+               seq_printf(file, " %2d", val[i]);
+       seq_puts(file, "\n");
+}
+EXPORT_SYMBOL_GPL(mt76_seq_puts_array);
+
+static int mt76_read_rate_txpower(struct seq_file *s, void *data)
+{
+       struct mt76_dev *dev = dev_get_drvdata(s->private);
+
+       mt76_seq_puts_array(s, "CCK", dev->rate_power.cck,
+                           ARRAY_SIZE(dev->rate_power.cck));
+       mt76_seq_puts_array(s, "OFDM", dev->rate_power.ofdm,
+                           ARRAY_SIZE(dev->rate_power.ofdm));
+       mt76_seq_puts_array(s, "STBC", dev->rate_power.stbc,
+                           ARRAY_SIZE(dev->rate_power.stbc));
+       mt76_seq_puts_array(s, "HT", dev->rate_power.ht,
+                           ARRAY_SIZE(dev->rate_power.ht));
+       mt76_seq_puts_array(s, "VHT", dev->rate_power.vht,
+                           ARRAY_SIZE(dev->rate_power.vht));
+       return 0;
+}
+
 struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
 {
        struct dentry *dir;
@@ -72,6 +101,8 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
        if (dev->otp.data)
                debugfs_create_blob("otp", 0400, dir, &dev->otp);
        debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read);
+       debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir,
+                                   mt76_read_rate_txpower);
 
        return dir;
 }
index c51da2205b938b87d5fb4b5c4ef0c15c1a5adaa4..f7fbd70164031165325d0eeb5f2067332828393e 100644 (file)
@@ -322,19 +322,13 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
        int len = SKB_WITH_OVERHEAD(q->buf_size);
        int offset = q->buf_offset;
        int idx;
-       void *(*alloc)(unsigned int fragsz);
-
-       if (napi)
-               alloc = napi_alloc_frag;
-       else
-               alloc = netdev_alloc_frag;
 
        spin_lock_bh(&q->lock);
 
        while (q->queued < q->ndesc - 1) {
                struct mt76_queue_buf qbuf;
 
-               buf = alloc(q->buf_size);
+               buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
                if (!buf)
                        break;
 
@@ -361,6 +355,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
 static void
 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
 {
+       struct page *page;
        void *buf;
        bool more;
 
@@ -373,6 +368,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
                skb_free_frag(buf);
        } while (1);
        spin_unlock_bh(&q->lock);
+
+       if (!q->rx_page.va)
+               return;
+
+       page = virt_to_page(q->rx_page.va);
+       __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
+       memset(&q->rx_page, 0, sizeof(q->rx_page));
 }
 
 static void
index 27248e24a19b1b82b08a99e894642e39afc91b93..357cc356342d214b96c024a482069e413c341db6 100644 (file)
 #define MT_DMA_CTL_LAST_SEC0           BIT(30)
 #define MT_DMA_CTL_DMA_DONE            BIT(31)
 
-#define MT_TXD_INFO_LEN                        GENMASK(15, 0)
-#define MT_TXD_INFO_NEXT_VLD           BIT(16)
-#define MT_TXD_INFO_TX_BURST           BIT(17)
-#define MT_TXD_INFO_80211              BIT(19)
-#define MT_TXD_INFO_TSO                        BIT(20)
-#define MT_TXD_INFO_CSO                        BIT(21)
-#define MT_TXD_INFO_WIV                        BIT(24)
-#define MT_TXD_INFO_QSEL               GENMASK(26, 25)
-#define MT_TXD_INFO_DPORT              GENMASK(29, 27)
-#define MT_TXD_INFO_TYPE               GENMASK(31, 30)
-
-#define MT_RX_FCE_INFO_LEN             GENMASK(13, 0)
-#define MT_RX_FCE_INFO_SELF_GEN                BIT(15)
-#define MT_RX_FCE_INFO_CMD_SEQ         GENMASK(19, 16)
-#define MT_RX_FCE_INFO_EVT_TYPE                GENMASK(23, 20)
-#define MT_RX_FCE_INFO_PCIE_INTR       BIT(24)
-#define MT_RX_FCE_INFO_QSEL            GENMASK(26, 25)
-#define MT_RX_FCE_INFO_D_PORT          GENMASK(29, 27)
-#define MT_RX_FCE_INFO_TYPE            GENMASK(31, 30)
-
-/* MCU request message header  */
-#define MT_MCU_MSG_LEN                 GENMASK(15, 0)
-#define MT_MCU_MSG_CMD_SEQ             GENMASK(19, 16)
-#define MT_MCU_MSG_CMD_TYPE            GENMASK(26, 20)
-#define MT_MCU_MSG_PORT                        GENMASK(29, 27)
-#define MT_MCU_MSG_TYPE                        GENMASK(31, 30)
-#define MT_MCU_MSG_TYPE_CMD            BIT(30)
-
 #define MT_DMA_HDR_LEN                 4
 #define MT_RX_INFO_LEN                 4
 #define MT_FCE_INFO_LEN                        4
@@ -65,14 +37,21 @@ struct mt76_desc {
        __le32 info;
 } __packed __aligned(4);
 
-enum dma_msg_port {
-       WLAN_PORT,
-       CPU_RX_PORT,
-       CPU_TX_PORT,
-       HOST_PORT,
-       VIRTUAL_CPU_RX_PORT,
-       VIRTUAL_CPU_TX_PORT,
-       DISCARD,
+enum mt76_qsel {
+       MT_QSEL_MGMT,
+       MT_QSEL_HCCA,
+       MT_QSEL_EDCA,
+       MT_QSEL_EDCA_2,
+};
+
+enum mt76_mcu_evt_type {
+       EVT_CMD_DONE,
+       EVT_CMD_ERROR,
+       EVT_CMD_RETRY,
+       EVT_EVENT_PWR_RSP,
+       EVT_EVENT_WOW_RSP,
+       EVT_EVENT_CARRIER_DETECT_RSP,
+       EVT_EVENT_DFS_DETECT_RSP,
 };
 
 int mt76_dma_attach(struct mt76_dev *dev);
index 029d54bce9e81e160fc6ad06a1192552ec817eca..2a699e8b79bfbdb2aadc798278c03439ed53811d 100644 (file)
@@ -283,6 +283,7 @@ mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops)
        spin_lock_init(&dev->rx_lock);
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->cc_lock);
+       mutex_init(&dev->mutex);
        init_waitqueue_head(&dev->tx_wait);
 
        return dev;
@@ -305,6 +306,8 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
 
        wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
 
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
        wiphy->available_antennas_tx = dev->antenna_mask;
        wiphy->available_antennas_rx = dev->antenna_mask;
 
@@ -472,7 +475,7 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
 }
 EXPORT_SYMBOL(mt76_wcid_key_setup);
 
-static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
+struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
 {
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct mt76_rx_status mstat;
@@ -497,6 +500,7 @@ static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
 
        return wcid_to_sta(mstat.wcid);
 }
+EXPORT_SYMBOL(mt76_rx_convert);
 
 static int
 mt76_check_ccmp_pn(struct sk_buff *skb)
@@ -546,6 +550,12 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
        struct mt76_wcid *wcid = status->wcid;
        bool ps;
 
+       if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
+               sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
+               if (sta)
+                       wcid = status->wcid = (struct mt76_wcid *) sta->drv_priv;
+       }
+
        if (!wcid || !wcid->sta)
                return;
 
index 09a14dead6e37cbb7ddc748af2712afaa25105d3..30a5d928e655ea3bd72ca400be7dc44a16f916ae 100644 (file)
@@ -21,7 +21,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
 {
        u32 val;
 
-       val = ioread32(dev->regs + offset);
+       val = ioread32(dev->mmio.regs + offset);
        trace_reg_rr(dev, offset, val);
 
        return val;
@@ -30,7 +30,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
 static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
 {
        trace_reg_wr(dev, offset, val);
-       iowrite32(val, dev->regs + offset);
+       iowrite32(val, dev->mmio.regs + offset);
 }
 
 static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
@@ -43,7 +43,31 @@ static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
 static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data,
                           int len)
 {
-       __iowrite32_copy(dev->regs + offset, data, len >> 2);
+       __iowrite32_copy(dev->mmio.regs + offset, data, len >> 2);
+}
+
+static int mt76_mmio_wr_rp(struct mt76_dev *dev, u32 base,
+                          const struct mt76_reg_pair *data, int len)
+{
+       while (len > 0) {
+               mt76_mmio_wr(dev, data->reg, data->value);
+               data++;
+               len--;
+       }
+
+       return 0;
+}
+
+static int mt76_mmio_rd_rp(struct mt76_dev *dev, u32 base,
+                          struct mt76_reg_pair *data, int len)
+{
+       while (len > 0) {
+               data->value = mt76_mmio_rr(dev, data->reg);
+               data++;
+               len--;
+       }
+
+       return 0;
 }
 
 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
@@ -53,9 +77,16 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
                .rmw = mt76_mmio_rmw,
                .wr = mt76_mmio_wr,
                .copy = mt76_mmio_copy,
+               .wr_rp = mt76_mmio_wr_rp,
+               .rd_rp = mt76_mmio_rd_rp,
        };
 
        dev->bus = &mt76_mmio_ops;
-       dev->regs = regs;
+       dev->mmio.regs = regs;
+
+       skb_queue_head_init(&dev->mmio.mcu.res_q);
+       init_waitqueue_head(&dev->mmio.mcu.wait);
+       spin_lock_init(&dev->mmio.irq_lock);
+       mutex_init(&dev->mmio.mcu.mutex);
 }
 EXPORT_SYMBOL_GPL(mt76_mmio_init);
index 2eab358791633e80e8e0f3c4b10c8d37d0839fa9..f723a07cab29c6f5db1d3ceb19f053c96b158e74 100644 (file)
 struct mt76_dev;
 struct mt76_wcid;
 
+struct mt76_reg_pair {
+       u32 reg;
+       u32 value;
+};
+
 struct mt76_bus_ops {
        u32 (*rr)(struct mt76_dev *dev, u32 offset);
        void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
        u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
        void (*copy)(struct mt76_dev *dev, u32 offset, const void *data,
                     int len);
+       int (*wr_rp)(struct mt76_dev *dev, u32 base,
+                    const struct mt76_reg_pair *rp, int len);
+       int (*rd_rp)(struct mt76_dev *dev, u32 base,
+                    struct mt76_reg_pair *rp, int len);
 };
 
 enum mt76_txq_id {
@@ -112,6 +121,18 @@ struct mt76_queue {
 
        dma_addr_t desc_dma;
        struct sk_buff *rx_head;
+       struct page_frag_cache rx_page;
+       spinlock_t rx_page_lock;
+};
+
+struct mt76_mcu_ops {
+       struct sk_buff *(*mcu_msg_alloc)(const void *data, int len);
+       int (*mcu_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
+                           int cmd, bool wait_resp);
+       int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
+                        const struct mt76_reg_pair *rp, int len);
+       int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
+                        struct mt76_reg_pair *rp, int len);
 };
 
 struct mt76_queue_ops {
@@ -143,6 +164,8 @@ enum mt76_wcid_flags {
        MT_WCID_FLAG_PS,
 };
 
+#define MT76_N_WCIDS 128
+
 struct mt76_wcid {
        struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
 
@@ -210,7 +233,6 @@ enum {
        MT76_OFFCHANNEL,
        MT76_REMOVED,
        MT76_READING_STATS,
-       MT76_MORE_STATS,
 };
 
 struct mt76_hw_cap {
@@ -252,6 +274,19 @@ struct mt76_sband {
        struct mt76_channel_state *chan;
 };
 
+struct mt76_rate_power {
+       union {
+               struct {
+                       s8 cck[4];
+                       s8 ofdm[8];
+                       s8 stbc[10];
+                       s8 ht[16];
+                       s8 vht[10];
+               };
+               s8 all[48];
+       };
+};
+
 /* addr req mask */
 #define MT_VEND_TYPE_EEPROM    BIT(31)
 #define MT_VEND_TYPE_CFG       BIT(30)
@@ -307,9 +342,29 @@ struct mt76_usb {
                struct completion cmpl;
                struct mt76u_buf res;
                u32 msg_seq;
+
+               /* multiple reads */
+               struct mt76_reg_pair *rp;
+               int rp_len;
+               u32 base;
+               bool burst;
        } mcu;
 };
 
+struct mt76_mmio {
+       struct mt76e_mcu {
+               struct mutex mutex;
+
+               wait_queue_head_t wait;
+               struct sk_buff_head res_q;
+
+               u32 msg_seq;
+       } mcu;
+       void __iomem *regs;
+       spinlock_t irq_lock;
+       u32 irqmask;
+};
+
 struct mt76_dev {
        struct ieee80211_hw *hw;
        struct cfg80211_chan_def chandef;
@@ -317,9 +372,12 @@ struct mt76_dev {
 
        spinlock_t lock;
        spinlock_t cc_lock;
+
+       struct mutex mutex;
+
        const struct mt76_bus_ops *bus;
        const struct mt76_driver_ops *drv;
-       void __iomem *regs;
+       const struct mt76_mcu_ops *mcu_ops;
        struct device *dev;
 
        struct net_device napi_dev;
@@ -334,11 +392,17 @@ struct mt76_dev {
 
        wait_queue_head_t tx_wait;
 
+       unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
+
+       struct mt76_wcid global_wcid;
+       struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
+
        u8 macaddr[ETH_ALEN];
        u32 rev;
        unsigned long state;
 
        u8 antenna_mask;
+       u16 chainmask;
 
        struct mt76_sband sband_2g;
        struct mt76_sband sband_5g;
@@ -346,6 +410,10 @@ struct mt76_dev {
        struct debugfs_blob_wrapper otp;
        struct mt76_hw_cap cap;
 
+       struct mt76_rate_power rate_power;
+       int txpower_conf;
+       int txpower_cur;
+
        u32 debugfs_reg;
 
        struct led_classdev led_cdev;
@@ -353,7 +421,12 @@ struct mt76_dev {
        bool led_al;
        u8 led_pin;
 
-       struct mt76_usb usb;
+       u32 rxfilter;
+
+       union {
+               struct mt76_mmio mmio;
+               struct mt76_usb usb;
+       };
 };
 
 enum mt76_phy_type {
@@ -364,18 +437,6 @@ enum mt76_phy_type {
        MT_PHY_TYPE_VHT,
 };
 
-struct mt76_rate_power {
-       union {
-               struct {
-                       s8 cck[4];
-                       s8 ofdm[8];
-                       s8 ht[16];
-                       s8 vht[10];
-               };
-               s8 all[38];
-       };
-};
-
 struct mt76_rx_status {
        struct mt76_wcid *wcid;
 
@@ -399,10 +460,23 @@ struct mt76_rx_status {
        s8 chain_signal[IEEE80211_MAX_CHAINS];
 };
 
+#define __mt76_rr(dev, ...)    (dev)->bus->rr((dev), __VA_ARGS__)
+#define __mt76_wr(dev, ...)    (dev)->bus->wr((dev), __VA_ARGS__)
+#define __mt76_rmw(dev, ...)   (dev)->bus->rmw((dev), __VA_ARGS__)
+#define __mt76_wr_copy(dev, ...)       (dev)->bus->copy((dev), __VA_ARGS__)
+
+#define __mt76_set(dev, offset, val)   __mt76_rmw(dev, offset, 0, val)
+#define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0)
+
 #define mt76_rr(dev, ...)      (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
 #define mt76_wr(dev, ...)      (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
 #define mt76_rmw(dev, ...)     (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr_rp(dev, ...)   (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
+#define mt76_rd_rp(dev, ...)   (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
+
+#define mt76_mcu_msg_alloc(dev, ...)   (dev)->mt76.mcu_ops->mcu_msg_alloc(__VA_ARGS__)
+#define mt76_mcu_send_msg(dev, ...)    (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
 
 #define mt76_set(dev, offset, val)     mt76_rmw(dev, offset, 0, val)
 #define mt76_clear(dev, offset, val)   mt76_rmw(dev, offset, val, 0)
@@ -413,6 +487,9 @@ struct mt76_rx_status {
 #define mt76_rmw_field(_dev, _reg, _field, _val)       \
        mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
 
+#define __mt76_rmw_field(_dev, _reg, _field, _val)     \
+       __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
 #define mt76_hw(dev) (dev)->mt76.hw
 
 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
@@ -469,6 +546,8 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
 void mt76_unregister_device(struct mt76_dev *dev);
 
 struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
+void mt76_seq_puts_array(struct seq_file *file, const char *str,
+                        s8 *val, int len);
 
 int mt76_eeprom_init(struct mt76_dev *dev, int len);
 void mt76_eeprom_override(struct mt76_dev *dev);
@@ -485,13 +564,7 @@ static inline int mt76_decr(int val, int size)
        return (val - 1) & (size - 1);
 }
 
-/* Hardware uses mirrored order of queues with Q3
- * having the highest priority
- */
-static inline u8 q2hwq(u8 q)
-{
-       return q ^ 0x3;
-}
+u8 mt76_ac_to_hwq(u8 ac);
 
 static inline struct ieee80211_txq *
 mtxq_to_txq(struct mt76_txq *mtxq)
@@ -543,6 +616,8 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
                         struct ieee80211_key_conf *key);
 
+struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
+
 /* internal */
 void mt76_tx_free(struct mt76_dev *dev);
 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
@@ -599,15 +674,9 @@ int mt76u_alloc_queues(struct mt76_dev *dev);
 void mt76u_stop_queues(struct mt76_dev *dev);
 void mt76u_stop_stat_wk(struct mt76_dev *dev);
 void mt76u_queues_deinit(struct mt76_dev *dev);
-int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
 
-int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
-                          int data_len, u32 max_payload, u32 offset);
 void mt76u_mcu_complete_urb(struct urb *urb);
-struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len);
-int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
-                      int cmd, bool wait_resp);
-void mt76u_mcu_fw_reset(struct mt76_dev *dev);
 int mt76u_mcu_init_rx(struct mt76_dev *dev);
+void mt76u_mcu_deinit(struct mt76_dev *dev);
 
 #endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig
new file mode 100644 (file)
index 0000000..9a6157d
--- /dev/null
@@ -0,0 +1,20 @@
+config MT76x0_COMMON
+       tristate
+       select MT76x02_LIB
+
+config MT76x0U
+       tristate "MediaTek MT76x0U (USB) support"
+       select MT76x0_COMMON
+       select MT76x02_USB
+       depends on MAC80211
+       depends on USB
+       help
+         This adds support for MT7610U-based wireless USB dongles.
+
+config MT76x0E
+       tristate "MediaTek MT76x0E (PCIe) support"
+       select MT76x0_COMMON
+       depends on MAC80211
+       depends on PCI
+       help
+         This adds support for MT7610/MT7630-based wireless PCIe devices.
index 7843908261ba0659157b895be8fbe14baf2a16bb..20672978dceb7e414c986721e89310752b47d9ef 100644 (file)
@@ -1,7 +1,12 @@
-obj-$(CONFIG_MT76x0U)    += mt76x0.o
+obj-$(CONFIG_MT76x0U) += mt76x0u.o
+obj-$(CONFIG_MT76x0E) += mt76x0e.o
+obj-$(CONFIG_MT76x0_COMMON) += mt76x0-common.o
+
+mt76x0-common-y := \
+       init.o main.o trace.o eeprom.o phy.o \
+       mac.o debugfs.o
+mt76x0u-y := usb.o usb_mcu.o
+mt76x0e-y := pci.o pci_mcu.o
 
-mt76x0-objs    = \
-       usb.o init.o main.o mcu.o trace.o dma.o eeprom.o phy.o \
-       mac.o util.o debugfs.o tx.o core.o
 # ccflags-y := -DDEBUG
 CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/core.c b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
deleted file mode 100644 (file)
index 892803f..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include "mt76x0.h"
-
-int mt76x0_wait_asic_ready(struct mt76x0_dev *dev)
-{
-       int i = 100;
-       u32 val;
-
-       do {
-               if (test_bit(MT76_REMOVED, &dev->mt76.state))
-                       return -EIO;
-
-               val = mt76_rr(dev, MT_MAC_CSR0);
-               if (val && ~val)
-                       return 0;
-
-               udelay(10);
-       } while (i--);
-
-       return -EIO;
-}
index e7a77a88606806fc9da59be52af0994fa9fb872c..3224e5b1a1e59b24df0bc37ce3d8a3e12b16d17b 100644 (file)
 #include "mt76x0.h"
 #include "eeprom.h"
 
-static int
-mt76_reg_set(void *data, u64 val)
-{
-       struct mt76x0_dev *dev = data;
-
-       mt76_wr(dev, dev->debugfs_reg, val);
-       return 0;
-}
-
-static int
-mt76_reg_get(void *data, u64 *val)
-{
-       struct mt76x0_dev *dev = data;
-
-       *val = mt76_rr(dev, dev->debugfs_reg);
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
-
 static int
 mt76x0_ampdu_stat_read(struct seq_file *file, void *data)
 {
-       struct mt76x0_dev *dev = file->private;
+       struct mt76x02_dev *dev = file->private;
        int i, j;
 
 #define stat_printf(grp, off, name)                                    \
@@ -95,72 +75,13 @@ static const struct file_operations fops_ampdu_stat = {
        .release = single_release,
 };
 
-static int
-mt76x0_eeprom_param_read(struct seq_file *file, void *data)
-{
-       struct mt76x0_dev *dev = file->private;
-       int i;
-
-       seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
-       seq_printf(file, "RSSI offset 2GHz: %hhx %hhx\n",
-                  dev->ee->rssi_offset_2ghz[0], dev->ee->rssi_offset_2ghz[1]);
-       seq_printf(file, "RSSI offset 5GHz: %hhx %hhx %hhx\n",
-                  dev->ee->rssi_offset_5ghz[0], dev->ee->rssi_offset_5ghz[1],
-                  dev->ee->rssi_offset_5ghz[2]);
-       seq_printf(file, "Temperature offset: %hhx\n", dev->ee->temp_off);
-       seq_printf(file, "LNA gain 2Ghz: %hhx\n", dev->ee->lna_gain_2ghz);
-       seq_printf(file, "LNA gain 5Ghz: %hhx %hhx %hhx\n",
-                  dev->ee->lna_gain_5ghz[0], dev->ee->lna_gain_5ghz[1],
-                  dev->ee->lna_gain_5ghz[2]);
-       seq_printf(file, "Power Amplifier type %hhx\n", dev->ee->pa_type);
-       seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
-                  dev->ee->reg.start + dev->ee->reg.num - 1);
-
-       seq_puts(file, "Per channel power:\n");
-       for (i = 0; i < 58; i++)
-               seq_printf(file, "\t%d chan:%d pwr:%d\n", i, i,
-                          dev->ee->tx_pwr_per_chan[i]);
-
-       seq_puts(file, "Per rate power 2GHz:\n");
-       for (i = 0; i < 5; i++)
-               seq_printf(file, "\t %d bw20:%d bw40:%d\n",
-                          i, dev->ee->tx_pwr_cfg_2g[i][0],
-                             dev->ee->tx_pwr_cfg_5g[i][1]);
-
-       seq_puts(file, "Per rate power 5GHz:\n");
-       for (i = 0; i < 5; i++)
-               seq_printf(file, "\t %d bw20:%d bw40:%d\n",
-                          i, dev->ee->tx_pwr_cfg_5g[i][0],
-                             dev->ee->tx_pwr_cfg_5g[i][1]);
-
-       return 0;
-}
-
-static int
-mt76x0_eeprom_param_open(struct inode *inode, struct file *f)
-{
-       return single_open(f, mt76x0_eeprom_param_read, inode->i_private);
-}
-
-static const struct file_operations fops_eeprom_param = {
-       .open = mt76x0_eeprom_param_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-void mt76x0_init_debugfs(struct mt76x0_dev *dev)
+void mt76x0_init_debugfs(struct mt76x02_dev *dev)
 {
        struct dentry *dir;
 
-       dir = debugfs_create_dir("mt76x0", dev->mt76.hw->wiphy->debugfsdir);
+       dir = mt76_register_debugfs(&dev->mt76);
        if (!dir)
                return;
 
-       debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
-       debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
-                           &fops_regval);
        debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
-       debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
-                           &fops_eeprom_param);
 }
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
deleted file mode 100644 (file)
index e2efb43..0000000
+++ /dev/null
@@ -1,522 +0,0 @@
-/*
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "mt76x0.h"
-#include "dma.h"
-#include "usb.h"
-#include "trace.h"
-
-static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
-                                struct mt76x0_dma_buf_rx *e, gfp_t gfp);
-
-static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
-{
-       const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
-       unsigned int hdrlen;
-
-       if (unlikely(len < 10))
-               return 0;
-       hdrlen = ieee80211_hdrlen(hdr->frame_control);
-       if (unlikely(hdrlen > len))
-               return 0;
-       return hdrlen;
-}
-
-static struct sk_buff *
-mt76x0_rx_skb_from_seg(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
-                       void *data, u32 seg_len, u32 truesize, struct page *p)
-{
-       struct sk_buff *skb;
-       u32 true_len, hdr_len = 0, copy, frag;
-
-       skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
-       if (!skb)
-               return NULL;
-
-       true_len = mt76x0_mac_process_rx(dev, skb, data, rxwi);
-       if (!true_len || true_len > seg_len)
-               goto bad_frame;
-
-       hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
-       if (!hdr_len)
-               goto bad_frame;
-
-       if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
-               memcpy(skb_put(skb, hdr_len), data, hdr_len);
-
-               data += hdr_len + 2;
-               true_len -= hdr_len;
-               hdr_len = 0;
-       }
-
-       /* If not doing paged RX allocated skb will always have enough space */
-       copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
-       frag = true_len - copy;
-
-       memcpy(skb_put(skb, copy), data, copy);
-       data += copy;
-
-       if (frag) {
-               skb_add_rx_frag(skb, 0, p, data - page_address(p),
-                               frag, truesize);
-               get_page(p);
-       }
-
-       return skb;
-
-bad_frame:
-       dev_err_ratelimited(dev->mt76.dev, "Error: incorrect frame len:%u hdr:%u\n",
-                           true_len, hdr_len);
-       dev_kfree_skb(skb);
-       return NULL;
-}
-
-static void mt76x0_rx_process_seg(struct mt76x0_dev *dev, u8 *data,
-                                  u32 seg_len, struct page *p)
-{
-       struct sk_buff *skb;
-       struct mt76x0_rxwi *rxwi;
-       u32 fce_info, truesize = seg_len;
-
-       /* DMA_INFO field at the beginning of the segment contains only some of
-        * the information, we need to read the FCE descriptor from the end.
-        */
-       fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
-       seg_len -= MT_FCE_INFO_LEN;
-
-       data += MT_DMA_HDR_LEN;
-       seg_len -= MT_DMA_HDR_LEN;
-
-       rxwi = (struct mt76x0_rxwi *) data;
-       data += sizeof(struct mt76x0_rxwi);
-       seg_len -= sizeof(struct mt76x0_rxwi);
-
-       if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
-               dev_err_once(dev->mt76.dev, "Error: RX path seen a non-pkt urb\n");
-
-       trace_mt76x0_rx(&dev->mt76, rxwi, fce_info);
-
-       skb = mt76x0_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
-       if (!skb)
-               return;
-
-       spin_lock(&dev->mac_lock);
-       ieee80211_rx(dev->mt76.hw, skb);
-       spin_unlock(&dev->mac_lock);
-}
-
-static u16 mt76x0_rx_next_seg_len(u8 *data, u32 data_len)
-{
-       u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
-               sizeof(struct mt76x0_rxwi) + MT_FCE_INFO_LEN;
-       u16 dma_len = get_unaligned_le16(data);
-
-       if (data_len < min_seg_len ||
-           WARN_ON(!dma_len) ||
-           WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
-           WARN_ON(dma_len & 0x3))
-               return 0;
-
-       return MT_DMA_HDRS + dma_len;
-}
-
-static void
-mt76x0_rx_process_entry(struct mt76x0_dev *dev, struct mt76x0_dma_buf_rx *e)
-{
-       u32 seg_len, data_len = e->urb->actual_length;
-       u8 *data = page_address(e->p);
-       struct page *new_p = NULL;
-       int cnt = 0;
-
-       if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
-               return;
-
-       /* Copy if there is very little data in the buffer. */
-       if (data_len > 512)
-               new_p = dev_alloc_pages(MT_RX_ORDER);
-
-       while ((seg_len = mt76x0_rx_next_seg_len(data, data_len))) {
-               mt76x0_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
-
-               data_len -= seg_len;
-               data += seg_len;
-               cnt++;
-       }
-
-       if (cnt > 1)
-               trace_mt76x0_rx_dma_aggr(&dev->mt76, cnt, !!new_p);
-
-       if (new_p) {
-               /* we have one extra ref from the allocator */
-               __free_pages(e->p, MT_RX_ORDER);
-
-               e->p = new_p;
-       }
-}
-
-static struct mt76x0_dma_buf_rx *
-mt76x0_rx_get_pending_entry(struct mt76x0_dev *dev)
-{
-       struct mt76x0_rx_queue *q = &dev->rx_q;
-       struct mt76x0_dma_buf_rx *buf = NULL;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->rx_lock, flags);
-
-       if (!q->pending)
-               goto out;
-
-       buf = &q->e[q->start];
-       q->pending--;
-       q->start = (q->start + 1) % q->entries;
-out:
-       spin_unlock_irqrestore(&dev->rx_lock, flags);
-
-       return buf;
-}
-
-static void mt76x0_complete_rx(struct urb *urb)
-{
-       struct mt76x0_dev *dev = urb->context;
-       struct mt76x0_rx_queue *q = &dev->rx_q;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->rx_lock, flags);
-
-       if (mt76x0_urb_has_error(urb))
-               dev_err(dev->mt76.dev, "Error: RX urb failed:%d\n", urb->status);
-       if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
-               goto out;
-
-       q->end = (q->end + 1) % q->entries;
-       q->pending++;
-       tasklet_schedule(&dev->rx_tasklet);
-out:
-       spin_unlock_irqrestore(&dev->rx_lock, flags);
-}
-
-static void mt76x0_rx_tasklet(unsigned long data)
-{
-       struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
-       struct mt76x0_dma_buf_rx *e;
-
-       while ((e = mt76x0_rx_get_pending_entry(dev))) {
-               if (e->urb->status)
-                       continue;
-
-               mt76x0_rx_process_entry(dev, e);
-               mt76x0_submit_rx_buf(dev, e, GFP_ATOMIC);
-       }
-}
-
-static void mt76x0_complete_tx(struct urb *urb)
-{
-       struct mt76x0_tx_queue *q = urb->context;
-       struct mt76x0_dev *dev = q->dev;
-       struct sk_buff *skb;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->tx_lock, flags);
-
-       if (mt76x0_urb_has_error(urb))
-               dev_err(dev->mt76.dev, "Error: TX urb failed:%d\n", urb->status);
-       if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
-               goto out;
-
-       skb = q->e[q->start].skb;
-       trace_mt76x0_tx_dma_done(&dev->mt76, skb);
-
-       __skb_queue_tail(&dev->tx_skb_done, skb);
-       tasklet_schedule(&dev->tx_tasklet);
-
-       if (q->used == q->entries - q->entries / 8)
-               ieee80211_wake_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
-
-       q->start = (q->start + 1) % q->entries;
-       q->used--;
-out:
-       spin_unlock_irqrestore(&dev->tx_lock, flags);
-}
-
-static void mt76x0_tx_tasklet(unsigned long data)
-{
-       struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
-       struct sk_buff_head skbs;
-       unsigned long flags;
-
-       __skb_queue_head_init(&skbs);
-
-       spin_lock_irqsave(&dev->tx_lock, flags);
-
-       set_bit(MT76_MORE_STATS, &dev->mt76.state);
-       if (!test_and_set_bit(MT76_READING_STATS, &dev->mt76.state))
-               queue_delayed_work(dev->stat_wq, &dev->stat_work,
-                                  msecs_to_jiffies(10));
-
-       skb_queue_splice_init(&dev->tx_skb_done, &skbs);
-
-       spin_unlock_irqrestore(&dev->tx_lock, flags);
-
-       while (!skb_queue_empty(&skbs)) {
-               struct sk_buff *skb = __skb_dequeue(&skbs);
-
-               mt76x0_tx_status(dev, skb);
-       }
-}
-
-static int mt76x0_dma_submit_tx(struct mt76x0_dev *dev,
-                                struct sk_buff *skb, u8 ep)
-{
-       struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
-       unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep]);
-       struct mt76x0_dma_buf_tx *e;
-       struct mt76x0_tx_queue *q = &dev->tx_q[ep];
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&dev->tx_lock, flags);
-
-       if (WARN_ON_ONCE(q->entries <= q->used)) {
-               ret = -ENOSPC;
-               goto out;
-       }
-
-       e = &q->e[q->end];
-       e->skb = skb;
-       usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
-                         mt76x0_complete_tx, q);
-       ret = usb_submit_urb(e->urb, GFP_ATOMIC);
-       if (ret) {
-               /* Special-handle ENODEV from TX urb submission because it will
-                * often be the first ENODEV we see after device is removed.
-                */
-               if (ret == -ENODEV)
-                       set_bit(MT76_REMOVED, &dev->mt76.state);
-               else
-                       dev_err(dev->mt76.dev, "Error: TX urb submit failed:%d\n",
-                               ret);
-               goto out;
-       }
-
-       q->end = (q->end + 1) % q->entries;
-       q->used++;
-
-       if (q->used >= q->entries)
-               ieee80211_stop_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
-out:
-       spin_unlock_irqrestore(&dev->tx_lock, flags);
-
-       return ret;
-}
-
-/* Map USB endpoint number to Q id in the DMA engine */
-static enum mt76_qsel ep2dmaq(u8 ep)
-{
-       if (ep == 5)
-               return MT_QSEL_MGMT;
-       return MT_QSEL_EDCA;
-}
-
-int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
-                          struct mt76_wcid *wcid, int hw_q)
-{
-       u8 ep = q2ep(hw_q);
-       u32 dma_flags;
-       int ret;
-
-       dma_flags = MT_TXD_PKT_INFO_80211;
-       if (wcid->hw_key_idx == 0xff)
-               dma_flags |= MT_TXD_PKT_INFO_WIV;
-
-       ret = mt76x0_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
-       if (ret)
-               return ret;
-
-       ret = mt76x0_dma_submit_tx(dev, skb, ep);
-
-       if (ret) {
-               ieee80211_free_txskb(dev->mt76.hw, skb);
-               return ret;
-       }
-
-       return 0;
-}
-
-static void mt76x0_kill_rx(struct mt76x0_dev *dev)
-{
-       int i;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->rx_lock, flags);
-
-       for (i = 0; i < dev->rx_q.entries; i++) {
-               int next = dev->rx_q.end;
-
-               spin_unlock_irqrestore(&dev->rx_lock, flags);
-               usb_poison_urb(dev->rx_q.e[next].urb);
-               spin_lock_irqsave(&dev->rx_lock, flags);
-       }
-
-       spin_unlock_irqrestore(&dev->rx_lock, flags);
-}
-
-static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
-                                struct mt76x0_dma_buf_rx *e, gfp_t gfp)
-{
-       struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
-       u8 *buf = page_address(e->p);
-       unsigned pipe;
-       int ret;
-
-       pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[MT_EP_IN_PKT_RX]);
-
-       usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
-                         mt76x0_complete_rx, dev);
-
-       trace_mt76x0_submit_urb(&dev->mt76, e->urb);
-       ret = usb_submit_urb(e->urb, gfp);
-       if (ret)
-               dev_err(dev->mt76.dev, "Error: submit RX URB failed:%d\n", ret);
-
-       return ret;
-}
-
-static int mt76x0_submit_rx(struct mt76x0_dev *dev)
-{
-       int i, ret;
-
-       for (i = 0; i < dev->rx_q.entries; i++) {
-               ret = mt76x0_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static void mt76x0_free_rx(struct mt76x0_dev *dev)
-{
-       int i;
-
-       for (i = 0; i < dev->rx_q.entries; i++) {
-               __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
-               usb_free_urb(dev->rx_q.e[i].urb);
-       }
-}
-
-static int mt76x0_alloc_rx(struct mt76x0_dev *dev)
-{
-       int i;
-
-       memset(&dev->rx_q, 0, sizeof(dev->rx_q));
-       dev->rx_q.dev = dev;
-       dev->rx_q.entries = N_RX_ENTRIES;
-
-       for (i = 0; i < N_RX_ENTRIES; i++) {
-               dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
-               dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
-
-               if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
-                       return -ENOMEM;
-       }
-
-       return 0;
-}
-
-static void mt76x0_free_tx_queue(struct mt76x0_tx_queue *q)
-{
-       int i;
-
-       WARN_ON(q->used);
-
-       for (i = 0; i < q->entries; i++)  {
-               usb_poison_urb(q->e[i].urb);
-               usb_free_urb(q->e[i].urb);
-       }
-}
-
-static void mt76x0_free_tx(struct mt76x0_dev *dev)
-{
-       int i;
-
-       for (i = 0; i < __MT_EP_OUT_MAX; i++)
-               mt76x0_free_tx_queue(&dev->tx_q[i]);
-}
-
-static int mt76x0_alloc_tx_queue(struct mt76x0_dev *dev,
-                                 struct mt76x0_tx_queue *q)
-{
-       int i;
-
-       q->dev = dev;
-       q->entries = N_TX_ENTRIES;
-
-       for (i = 0; i < N_TX_ENTRIES; i++) {
-               q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
-               if (!q->e[i].urb)
-                       return -ENOMEM;
-       }
-
-       return 0;
-}
-
-static int mt76x0_alloc_tx(struct mt76x0_dev *dev)
-{
-       int i;
-
-       dev->tx_q = devm_kcalloc(dev->mt76.dev, __MT_EP_OUT_MAX,
-                                sizeof(*dev->tx_q), GFP_KERNEL);
-
-       for (i = 0; i < __MT_EP_OUT_MAX; i++)
-               if (mt76x0_alloc_tx_queue(dev, &dev->tx_q[i]))
-                       return -ENOMEM;
-
-       return 0;
-}
-
-int mt76x0_dma_init(struct mt76x0_dev *dev)
-{
-       int ret = -ENOMEM;
-
-       tasklet_init(&dev->tx_tasklet, mt76x0_tx_tasklet, (unsigned long) dev);
-       tasklet_init(&dev->rx_tasklet, mt76x0_rx_tasklet, (unsigned long) dev);
-
-       ret = mt76x0_alloc_tx(dev);
-       if (ret)
-               goto err;
-       ret = mt76x0_alloc_rx(dev);
-       if (ret)
-               goto err;
-
-       ret = mt76x0_submit_rx(dev);
-       if (ret)
-               goto err;
-
-       return 0;
-err:
-       mt76x0_dma_cleanup(dev);
-       return ret;
-}
-
-void mt76x0_dma_cleanup(struct mt76x0_dev *dev)
-{
-       mt76x0_kill_rx(dev);
-
-       tasklet_kill(&dev->rx_tasklet);
-
-       mt76x0_free_rx(dev);
-       mt76x0_free_tx(dev);
-
-       tasklet_kill(&dev->tx_tasklet);
-}
index 36da1e6bc21ab5832dc5432996307f099ff99c8a..5735038c0e2d5d7417d389c26a32ba15cdcc0890 100644 (file)
@@ -13,6 +13,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <asm/unaligned.h>
 #include "mt76x0.h"
 #include "eeprom.h"
-
-static bool
-field_valid(u8 val)
-{
-       return val != 0xff;
-}
-
-static s8
-field_validate(u8 val)
-{
-       if (!field_valid(val))
-               return 0;
-
-       return val;
-}
-
-static inline int
-sign_extend(u32 val, unsigned int size)
-{
-       bool sign = val & BIT(size - 1);
-
-       val &= BIT(size - 1) - 1;
-
-       return sign ? val : -val;
-}
-
-static int
-mt76x0_efuse_read(struct mt76x0_dev *dev, u16 addr, u8 *data,
-                  enum mt76x0_eeprom_access_modes mode)
-{
-       u32 val;
-       int i;
-
-       val = mt76_rr(dev, MT_EFUSE_CTRL);
-       val &= ~(MT_EFUSE_CTRL_AIN |
-                MT_EFUSE_CTRL_MODE);
-       val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
-              FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
-              MT_EFUSE_CTRL_KICK;
-       mt76_wr(dev, MT_EFUSE_CTRL, val);
-
-       if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
-               return -ETIMEDOUT;
-
-       val = mt76_rr(dev, MT_EFUSE_CTRL);
-       if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
-               /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
-                * will not return valid data but it's ok.
-                */
-               memset(data, 0xff, 16);
-               return 0;
-       }
-
-       for (i = 0; i < 4; i++) {
-               val = mt76_rr(dev, MT_EFUSE_DATA(i));
-               put_unaligned_le32(val, data + 4 * i);
-       }
-
-       return 0;
-}
+#include "../mt76x02_phy.h"
 
 #define MT_MAP_READS   DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16)
 static int
-mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
+mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev)
 {
        u8 data[MT_MAP_READS * 16];
        int ret, i;
        u32 start = 0, end = 0, cnt_free;
 
-       for (i = 0; i < MT_MAP_READS; i++) {
-               ret = mt76x0_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
-                                        data + i * 16, MT_EE_PHYSICAL_READ);
-               if (ret)
-                       return ret;
-       }
+       ret = mt76x02_get_efuse_data(&dev->mt76, MT_EE_USAGE_MAP_START,
+                                    data, sizeof(data), MT_EE_PHYSICAL_READ);
+       if (ret)
+               return ret;
 
        for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
                if (!data[i]) {
@@ -105,341 +45,307 @@ mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
        cnt_free = end - start + 1;
 
        if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
-               dev_err(dev->mt76.dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
+               dev_err(dev->mt76.dev,
+                       "driver does not support default EEPROM\n");
                return -EINVAL;
        }
 
        return 0;
 }
 
-static void
-mt76x0_set_chip_cap(struct mt76x0_dev *dev, u8 *eeprom)
+static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
 {
-       enum mt76x2_board_type { BOARD_TYPE_2GHZ = 1, BOARD_TYPE_5GHZ = 2 };
-       u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
-       u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
-
-       dev_dbg(dev->mt76.dev, "NIC_CONF0: %04x NIC_CONF1: %04x\n", nic_conf0, nic_conf1);
-
-       switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, nic_conf0)) {
-       case BOARD_TYPE_5GHZ:
-               dev->ee->has_5ghz = true;
-               break;
-       case BOARD_TYPE_2GHZ:
-               dev->ee->has_2ghz = true;
-               break;
-       default:
-               dev->ee->has_2ghz = true;
-               dev->ee->has_5ghz = true;
-               break;
-       }
+       u16 nic_conf0 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0);
+       u16 nic_conf1 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1);
+
+       mt76x02_eeprom_parse_hw_cap(&dev->mt76);
+       dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n",
+               dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz);
 
-       dev_dbg(dev->mt76.dev, "Has 2GHZ %d 5GHZ %d\n", dev->ee->has_2ghz, dev->ee->has_5ghz);
+       if (dev->no_2ghz) {
+               dev->mt76.cap.has_2ghz = false;
+               dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
+       }
 
-       if (!field_valid(nic_conf1 & 0xff))
+       if (!mt76x02_field_valid(nic_conf1 & 0xff))
                nic_conf1 &= 0xff00;
 
        if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
                dev_err(dev->mt76.dev,
-                       "Error: this driver does not support HW RF ctrl\n");
+                       "driver does not support HW RF ctrl\n");
 
-       if (!field_valid(nic_conf0 >> 8))
+       if (!mt76x02_field_valid(nic_conf0 >> 8))
                return;
 
        if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
            FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
-               dev_err(dev->mt76.dev,
-                       "Error: device has more than 1 RX/TX stream!\n");
-
-       dev->ee->pa_type = FIELD_GET(MT_EE_NIC_CONF_0_PA_TYPE, nic_conf0);
-       dev_dbg(dev->mt76.dev, "PA Type %d\n", dev->ee->pa_type);
+               dev_err(dev->mt76.dev, "invalid tx-rx stream\n");
 }
 
-static int
-mt76x0_set_macaddr(struct mt76x0_dev *dev, const u8 *eeprom)
+static void mt76x0_set_temp_offset(struct mt76x02_dev *dev)
 {
-       const void *src = eeprom + MT_EE_MAC_ADDR;
-
-       ether_addr_copy(dev->macaddr, src);
-
-       if (!is_valid_ether_addr(dev->macaddr)) {
-               eth_random_addr(dev->macaddr);
-               dev_info(dev->mt76.dev,
-                        "Invalid MAC address, using random address %pM\n",
-                        dev->macaddr);
-       }
+       u8 val;
 
-       mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
-       mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
-               FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
-
-       return 0;
-}
-
-static void
-mt76x0_set_temp_offset(struct mt76x0_dev *dev, u8 *eeprom)
-{
-       u8 temp = eeprom[MT_EE_TEMP_OFFSET];
-
-       if (field_valid(temp))
-               dev->ee->temp_off = sign_extend(temp, 8);
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_2G_TARGET_POWER) >> 8;
+       if (mt76x02_field_valid(val))
+               dev->cal.rx.temp_offset = mt76x02_sign_extend(val, 8);
        else
-               dev->ee->temp_off = -10;
+               dev->cal.rx.temp_offset = -10;
 }
 
-static void
-mt76x0_set_country_reg(struct mt76x0_dev *dev, u8 *eeprom)
+static void mt76x0_set_freq_offset(struct mt76x02_dev *dev)
 {
-       /* Note: - region 31 is not valid for mt76x0 (see rtmp_init.c)
-        *       - comments in rtmp_def.h are incorrect (see rt_channel.c)
-        */
-       static const struct reg_channel_bounds chan_bounds[] = {
-               /* EEPROM country regions 0 - 7 */
-               {  1, 11 },     {  1, 13 },     { 10,  2 },     { 10,  4 },
-               { 14,  1 },     {  1, 14 },     {  3,  7 },     {  5,  9 },
-               /* EEPROM country regions 32 - 33 */
-               {  1, 11 },     {  1, 14 }
-       };
-       u8 val = eeprom[MT_EE_COUNTRY_REGION_2GHZ];
-       int idx = -1;
-
-       dev_dbg(dev->mt76.dev, "REG 2GHZ %u REG 5GHZ %u\n", val, eeprom[MT_EE_COUNTRY_REGION_5GHZ]);
-       if (val < 8)
-               idx = val;
-       if (val > 31 && val < 33)
-               idx = val - 32 + 8;
-
-       if (idx != -1)
-               dev_info(dev->mt76.dev,
-                        "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
-                        val, chan_bounds[idx].start,
-                        chan_bounds[idx].start + chan_bounds[idx].num - 1);
-       else
-               idx = 5; /* channels 1 - 14 */
-
-       dev->ee->reg = chan_bounds[idx];
+       struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
+       u8 val;
 
-       /* TODO: country region 33 is special - phy should be set to B-mode
-        *       before entering channel 14 (see sta/connect.c)
-        */
-}
-
-static void
-mt76x0_set_rf_freq_off(struct mt76x0_dev *dev, u8 *eeprom)
-{
-       u8 comp;
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_FREQ_OFFSET);
+       if (!mt76x02_field_valid(val))
+               val = 0;
+       caldata->freq_offset = val;
 
-       dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
-       comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TSSI_BOUND4) >> 8;
+       if (!mt76x02_field_valid(val))
+               val = 0;
 
-       if (comp & BIT(7))
-               dev->ee->rf_freq_off -= comp & 0x7f;
-       else
-               dev->ee->rf_freq_off += comp;
+       caldata->freq_offset -= mt76x02_sign_extend(val, 8);
 }
 
-static void
-mt76x0_set_lna_gain(struct mt76x0_dev *dev, u8 *eeprom)
+void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
 {
-       u8 gain;
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+       struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
+       s8 val, lna_5g[3], lna_2g;
+       u16 rssi_offset;
+       int i;
 
-       dev->ee->lna_gain_2ghz = eeprom[MT_EE_LNA_GAIN_2GHZ];
-       dev->ee->lna_gain_5ghz[0] = eeprom[MT_EE_LNA_GAIN_5GHZ_0];
+       mt76x02_get_rx_gain(&dev->mt76, chan->band, &rssi_offset,
+                           &lna_2g, lna_5g);
+       caldata->lna_gain = mt76x02_get_lna_gain(&dev->mt76, &lna_2g,
+                                                lna_5g, chan);
 
-       gain = eeprom[MT_EE_LNA_GAIN_5GHZ_1];
-       if (gain == 0xff || gain == 0)
-               dev->ee->lna_gain_5ghz[1] = dev->ee->lna_gain_5ghz[0];
-       else
-               dev->ee->lna_gain_5ghz[1] = gain;
+       for (i = 0; i < ARRAY_SIZE(caldata->rssi_offset); i++) {
+               val = rssi_offset >> (8 * i);
+               if (val < -10 || val > 10)
+                       val = 0;
 
-       gain = eeprom[MT_EE_LNA_GAIN_5GHZ_2];
-       if (gain == 0xff || gain == 0)
-               dev->ee->lna_gain_5ghz[2] = dev->ee->lna_gain_5ghz[0];
-       else
-               dev->ee->lna_gain_5ghz[2] = gain;
+               caldata->rssi_offset[i] = val;
+       }
 }
 
-static void
-mt76x0_set_rssi_offset(struct mt76x0_dev *dev, u8 *eeprom)
+static s8 mt76x0_get_delta(struct mt76_dev *dev)
 {
-       int i;
-       s8 *rssi_offset = dev->ee->rssi_offset_2ghz;
-
-       for (i = 0; i < 2; i++) {
-               rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
+       struct cfg80211_chan_def *chandef = &dev->chandef;
+       u8 val;
 
-               if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
-                       dev_warn(dev->mt76.dev,
-                                "Warning: EEPROM RSSI is invalid %02hhx\n",
-                                rssi_offset[i]);
-                       rssi_offset[i] = 0;
-               }
-       }
-
-       rssi_offset = dev->ee->rssi_offset_5ghz;
+       if (mt76x02_tssi_enabled(dev))
+               return 0;
 
-       for (i = 0; i < 3; i++) {
-               rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET_5GHZ + i];
+       if (chandef->width == NL80211_CHAN_WIDTH_80) {
+               val = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER) >> 8;
+       } else if (chandef->width == NL80211_CHAN_WIDTH_40) {
+               u16 data;
 
-               if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
-                       dev_warn(dev->mt76.dev,
-                                "Warning: EEPROM RSSI is invalid %02hhx\n",
-                                rssi_offset[i]);
-                       rssi_offset[i] = 0;
-               }
+               data = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+               if (chandef->chan->band == NL80211_BAND_5GHZ)
+                       val = data >> 8;
+               else
+                       val = data;
+       } else {
+               return 0;
        }
+
+       return mt76x02_rate_power_val(val);
 }
 
-static u32
-calc_bw40_power_rate(u32 value, int delta)
+void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
 {
-       u32 ret = 0;
-       int i, tmp;
-
-       for (i = 0; i < 4; i++) {
-               tmp = s6_to_int((value >> i*8) & 0xff) + delta;
-               ret |= (u32)(int_to_s6(tmp)) << i*8;
-       }
-
-       return ret;
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+       bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
+       struct mt76_rate_power *t = &dev->mt76.rate_power;
+       s8 delta = mt76x0_get_delta(&dev->mt76);
+       u16 val, addr;
+
+       memset(t, 0, sizeof(*t));
+
+       /* cck 1M, 2M, 5.5M, 11M */
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_BYRATE_BASE);
+       t->cck[0] = t->cck[1] = s6_to_s8(val);
+       t->cck[2] = t->cck[3] = s6_to_s8(val >> 8);
+
+       /* ofdm 6M, 9M, 12M, 18M */
+       addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 2 : 0x120;
+       val = mt76x02_eeprom_get(&dev->mt76, addr);
+       t->ofdm[0] = t->ofdm[1] = s6_to_s8(val);
+       t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8);
+
+       /* ofdm 24M, 36M, 48M, 54M */
+       addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 4 : 0x122;
+       val = mt76x02_eeprom_get(&dev->mt76, addr);
+       t->ofdm[4] = t->ofdm[5] = s6_to_s8(val);
+       t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8);
+
+       /* ht-vht mcs 1ss 0, 1, 2, 3 */
+       addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124;
+       val = mt76x02_eeprom_get(&dev->mt76, addr);
+       t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val);
+       t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8);
+
+       /* ht-vht mcs 1ss 4, 5, 6 */
+       addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126;
+       val = mt76x02_eeprom_get(&dev->mt76, addr);
+       t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
+       t->ht[6] = t->vht[6] = s6_to_s8(val >> 8);
+
+       /* ht-vht mcs 1ss 0, 1, 2, 3 stbc */
+       addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec;
+       val = mt76x02_eeprom_get(&dev->mt76, addr);
+       t->stbc[0] = t->stbc[1] = s6_to_s8(val);
+       t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8);
+
+       /* ht-vht mcs 1ss 4, 5, 6 stbc */
+       addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee;
+       val = mt76x02_eeprom_get(&dev->mt76, addr);
+       t->stbc[4] = t->stbc[5] = s6_to_s8(val);
+       t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8);
+
+       /* vht mcs 8, 9 5GHz */
+       val = mt76x02_eeprom_get(&dev->mt76, 0x132);
+       t->vht[7] = s6_to_s8(val);
+       t->vht[8] = s6_to_s8(val >> 8);
+
+       mt76x02_add_rate_power_offset(t, delta);
 }
 
-static s8
-get_delta(u8 val)
+void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
 {
-       s8 ret;
+       struct mt76x0_chan_map {
+               u8 chan;
+               u8 offset;
+       } chan_map[] = {
+               {   2,  0 }, {   4,  1 }, {   6,  2 }, {   8,  3 },
+               {  10,  4 }, {  12,  5 }, {  14,  6 }, {  38,  0 },
+               {  44,  1 }, {  48,  2 }, {  54,  3 }, {  60,  4 },
+               {  64,  5 }, { 102,  6 }, { 108,  7 }, { 112,  8 },
+               { 118,  9 }, { 124, 10 }, { 128, 11 }, { 134, 12 },
+               { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 },
+               { 167, 17 }, { 171, 18 }, { 173, 19 },
+       };
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+       u8 offset, addr;
+       u16 data;
+       int i;
 
-       if (!field_valid(val) || !(val & BIT(7)))
-               return 0;
+       for (i = 0; i < ARRAY_SIZE(chan_map); i++) {
+               if (chan_map[i].chan <= chan->hw_value) {
+                       offset = chan_map[i].offset;
+                       break;
+               }
+       }
+       if (i == ARRAY_SIZE(chan_map))
+               offset = chan_map[0].offset;
+
+       if (chan->band == NL80211_BAND_2GHZ) {
+               addr = MT_EE_TX_POWER_DELTA_BW80 + offset;
+       } else {
+               switch (chan->hw_value) {
+               case 58:
+                       offset = 8;
+                       break;
+               case 106:
+                       offset = 14;
+                       break;
+               case 112:
+                       offset = 20;
+                       break;
+               case 155:
+                       offset = 30;
+                       break;
+               default:
+                       break;
+               }
+               addr = MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE + 2 + offset;
+       }
 
-       ret = val & 0x1f;
-       if (ret > 8)
-               ret = 8;
-       if (val & BIT(6))
-               ret = -ret;
+       data = mt76x02_eeprom_get(&dev->mt76, addr);
 
-       return ret;
+       info[0] = data;
+       if (!info[0] || info[0] > 0x3f)
+               info[0] = 5;
+
+       info[1] = data >> 8;
+       if (!info[1] || info[1] > 0x3f)
+               info[1] = 5;
 }
 
-static void
-mt76x0_set_tx_power_per_rate(struct mt76x0_dev *dev, u8 *eeprom)
+static int mt76x0_check_eeprom(struct mt76x02_dev *dev)
 {
-       s8 bw40_delta_2g, bw40_delta_5g;
-       u32 val;
-       int i;
-
-       bw40_delta_2g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
-       bw40_delta_5g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40 + 1]);
-
-       for (i = 0; i < 5; i++) {
-               val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
+       u16 val;
 
-               /* Skip last 16 bits. */
-               if (i == 4)
-                       val &= 0x0000ffff;
+       val = get_unaligned_le16(dev->mt76.eeprom.data);
+       if (!val)
+               val = get_unaligned_le16(dev->mt76.eeprom.data +
+                                        MT_EE_PCI_ID);
 
-               dev->ee->tx_pwr_cfg_2g[i][0] = val;
-               dev->ee->tx_pwr_cfg_2g[i][1] = calc_bw40_power_rate(val, bw40_delta_2g);
+       switch (val) {
+       case 0x7650:
+       case 0x7610:
+               return 0;
+       default:
+               dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n",
+                       val);
+               return -EINVAL;
        }
-
-       /* Reading per rate tx power for 5 GHz band is a bit more complex. Note
-        * we mix 16 bit and 32 bit reads and sometimes do shifts.
-        */
-       val = get_unaligned_le16(eeprom + 0x120);
-       val <<= 16;
-       dev->ee->tx_pwr_cfg_5g[0][0] = val;
-       dev->ee->tx_pwr_cfg_5g[0][1] = calc_bw40_power_rate(val, bw40_delta_5g);
-
-       val = get_unaligned_le32(eeprom + 0x122);
-       dev->ee->tx_pwr_cfg_5g[1][0] = val;
-       dev->ee->tx_pwr_cfg_5g[1][1] = calc_bw40_power_rate(val, bw40_delta_5g);
-
-       val = get_unaligned_le16(eeprom + 0x126);
-       dev->ee->tx_pwr_cfg_5g[2][0] = val;
-       dev->ee->tx_pwr_cfg_5g[2][1] = calc_bw40_power_rate(val, bw40_delta_5g);
-
-       val = get_unaligned_le16(eeprom + 0xec);
-       val <<= 16;
-       dev->ee->tx_pwr_cfg_5g[3][0] = val;
-       dev->ee->tx_pwr_cfg_5g[3][1] = calc_bw40_power_rate(val, bw40_delta_5g);
-
-       val = get_unaligned_le16(eeprom + 0xee);
-       dev->ee->tx_pwr_cfg_5g[4][0] = val;
-       dev->ee->tx_pwr_cfg_5g[4][1] = calc_bw40_power_rate(val, bw40_delta_5g);
 }
 
-static void
-mt76x0_set_tx_power_per_chan(struct mt76x0_dev *dev, u8 *eeprom)
+static int mt76x0_load_eeprom(struct mt76x02_dev *dev)
 {
-       int i;
-       u8 tx_pwr;
+       int found;
 
-       for (i = 0; i < 14; i++) {
-               tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_2GHZ + i];
-               if (tx_pwr <= 0x3f && tx_pwr > 0)
-                       dev->ee->tx_pwr_per_chan[i] = tx_pwr;
-               else
-                       dev->ee->tx_pwr_per_chan[i] = 5;
-       }
+       found = mt76_eeprom_init(&dev->mt76, MT76X0_EEPROM_SIZE);
+       if (found < 0)
+               return found;
 
-       for (i = 0; i < 40; i++) {
-               tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_5GHZ + i];
-               if (tx_pwr <= 0x3f && tx_pwr > 0)
-                       dev->ee->tx_pwr_per_chan[14 + i] = tx_pwr;
-               else
-                       dev->ee->tx_pwr_per_chan[14 + i] = 5;
-       }
+       if (found && !mt76x0_check_eeprom(dev))
+               return 0;
+
+       found = mt76x0_efuse_physical_size_check(dev);
+       if (found < 0)
+               return found;
 
-       dev->ee->tx_pwr_per_chan[54] = dev->ee->tx_pwr_per_chan[22];
-       dev->ee->tx_pwr_per_chan[55] = dev->ee->tx_pwr_per_chan[28];
-       dev->ee->tx_pwr_per_chan[56] = dev->ee->tx_pwr_per_chan[34];
-       dev->ee->tx_pwr_per_chan[57] = dev->ee->tx_pwr_per_chan[44];
+       return mt76x02_get_efuse_data(&dev->mt76, 0, dev->mt76.eeprom.data,
+                                     MT76X0_EEPROM_SIZE, MT_EE_READ);
 }
 
-int
-mt76x0_eeprom_init(struct mt76x0_dev *dev)
+int mt76x0_eeprom_init(struct mt76x02_dev *dev)
 {
-       u8 *eeprom;
-       int i, ret;
-
-       ret = mt76x0_efuse_physical_size_check(dev);
-       if (ret)
-               return ret;
-
-       dev->ee = devm_kzalloc(dev->mt76.dev, sizeof(*dev->ee), GFP_KERNEL);
-       if (!dev->ee)
-               return -ENOMEM;
+       u8 version, fae;
+       u16 data;
+       int err;
 
-       eeprom = kmalloc(MT76X0_EEPROM_SIZE, GFP_KERNEL);
-       if (!eeprom)
-               return -ENOMEM;
+       err = mt76x0_load_eeprom(dev);
+       if (err < 0)
+               return err;
 
-       for (i = 0; i + 16 <= MT76X0_EEPROM_SIZE; i += 16) {
-               ret = mt76x0_efuse_read(dev, i, eeprom + i, MT_EE_READ);
-               if (ret)
-                       goto out;
-       }
+       data = mt76x02_eeprom_get(&dev->mt76, MT_EE_VERSION);
+       version = data >> 8;
+       fae = data;
 
-       if (eeprom[MT_EE_VERSION_EE] > MT76X0U_EE_MAX_VER)
+       if (version > MT76X0U_EE_MAX_VER)
                dev_warn(dev->mt76.dev,
                         "Warning: unsupported EEPROM version %02hhx\n",
-                        eeprom[MT_EE_VERSION_EE]);
+                        version);
        dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
-                eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
-
-       mt76x0_set_macaddr(dev, eeprom);
-       mt76x0_set_chip_cap(dev, eeprom);
-       mt76x0_set_country_reg(dev, eeprom);
-       mt76x0_set_rf_freq_off(dev, eeprom);
-       mt76x0_set_temp_offset(dev, eeprom);
-       mt76x0_set_lna_gain(dev, eeprom);
-       mt76x0_set_rssi_offset(dev, eeprom);
-       dev->chainmask = 0x0101;
-
-       mt76x0_set_tx_power_per_rate(dev, eeprom);
-       mt76x0_set_tx_power_per_chan(dev, eeprom);
-
-out:
-       kfree(eeprom);
-       return ret;
+                version, fae);
+
+       mt76x02_mac_setaddr(&dev->mt76,
+                           dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+       mt76x0_set_chip_cap(dev);
+       mt76x0_set_freq_offset(dev);
+       mt76x0_set_temp_offset(dev);
+
+       dev->mt76.chainmask = 0x0101;
+
+       return 0;
 }
+
+MODULE_LICENSE("Dual BSD/GPL");
index e37b573aed7b88e339d9b22e89cb20f9fa734110..40fd4e61769b34143416efaeea1470f5acdfffd9 100644 (file)
 #ifndef __MT76X0U_EEPROM_H
 #define __MT76X0U_EEPROM_H
 
-struct mt76x0_dev;
+#include "../mt76x02_eeprom.h"
 
-#define MT76X0U_EE_MAX_VER                     0x0c
-#define MT76X0_EEPROM_SIZE                     512
+struct mt76x02_dev;
 
-#define MT76X0U_DEFAULT_TX_POWER               6
+#define MT76X0U_EE_MAX_VER             0x0c
+#define MT76X0_EEPROM_SIZE             512
 
-enum mt76_eeprom_field {
-       MT_EE_CHIP_ID =                         0x00,
-       MT_EE_VERSION_FAE =                     0x02,
-       MT_EE_VERSION_EE =                      0x03,
-       MT_EE_MAC_ADDR =                        0x04,
-       MT_EE_NIC_CONF_0 =                      0x34,
-       MT_EE_NIC_CONF_1 =                      0x36,
-       MT_EE_COUNTRY_REGION_5GHZ =             0x38,
-       MT_EE_COUNTRY_REGION_2GHZ =             0x39,
-       MT_EE_FREQ_OFFSET =                     0x3a,
-       MT_EE_NIC_CONF_2 =                      0x42,
+int mt76x0_eeprom_init(struct mt76x02_dev *dev);
+void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
+void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev);
+void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info);
 
-       MT_EE_LNA_GAIN_2GHZ =                   0x44,
-       MT_EE_LNA_GAIN_5GHZ_0 =                 0x45,
-       MT_EE_RSSI_OFFSET =                     0x46,
-       MT_EE_RSSI_OFFSET_5GHZ =                0x4a,
-       MT_EE_LNA_GAIN_5GHZ_1 =                 0x49,
-       MT_EE_LNA_GAIN_5GHZ_2 =                 0x4d,
-
-       MT_EE_TX_POWER_DELTA_BW40 =             0x50,
-
-       MT_EE_TX_POWER_OFFSET_2GHZ =            0x52,
-
-       MT_EE_TX_TSSI_SLOPE =                   0x6e,
-       MT_EE_TX_TSSI_OFFSET_GROUP =            0x6f,
-       MT_EE_TX_TSSI_OFFSET =                  0x76,
-
-       MT_EE_TX_POWER_OFFSET_5GHZ =            0x78,
-
-       MT_EE_TEMP_OFFSET =                     0xd1,
-       MT_EE_FREQ_OFFSET_COMPENSATION =        0xdb,
-       MT_EE_TX_POWER_BYRATE_BASE =            0xde,
-
-       MT_EE_TX_POWER_BYRATE_BASE_5GHZ =       0x120,
-
-       MT_EE_USAGE_MAP_START =                 0x1e0,
-       MT_EE_USAGE_MAP_END =                   0x1fc,
-};
-
-#define MT_EE_NIC_CONF_0_RX_PATH               GENMASK(3, 0)
-#define MT_EE_NIC_CONF_0_TX_PATH               GENMASK(7, 4)
-#define MT_EE_NIC_CONF_0_PA_TYPE               GENMASK(9, 8)
-#define MT_EE_NIC_CONF_0_BOARD_TYPE            GENMASK(13, 12)
-
-#define MT_EE_NIC_CONF_1_HW_RF_CTRL            BIT(0)
-#define MT_EE_NIC_CONF_1_TEMP_TX_ALC           BIT(1)
-#define MT_EE_NIC_CONF_1_LNA_EXT_2G            BIT(2)
-#define MT_EE_NIC_CONF_1_LNA_EXT_5G            BIT(3)
-#define MT_EE_NIC_CONF_1_TX_ALC_EN             BIT(13)
-
-#define MT_EE_NIC_CONF_2_RX_STREAM             GENMASK(3, 0)
-#define MT_EE_NIC_CONF_2_TX_STREAM             GENMASK(7, 4)
-#define MT_EE_NIC_CONF_2_HW_ANTDIV             BIT(8)
-#define MT_EE_NIC_CONF_2_XTAL_OPTION           GENMASK(10, 9)
-#define MT_EE_NIC_CONF_2_TEMP_DISABLE          BIT(11)
-#define MT_EE_NIC_CONF_2_COEX_METHOD           GENMASK(15, 13)
-
-#define MT_EE_TX_POWER_BYRATE(i)               (MT_EE_TX_POWER_BYRATE_BASE + \
-                                                (i) * 4)
-
-#define MT_EFUSE_USAGE_MAP_SIZE                        (MT_EE_USAGE_MAP_END -  \
-                                                MT_EE_USAGE_MAP_START + 1)
-
-enum mt76x0_eeprom_access_modes {
-       MT_EE_READ = 0,
-       MT_EE_PHYSICAL_READ = 1,
-};
-
-struct reg_channel_bounds {
-       u8 start;
-       u8 num;
-};
-
-struct mt76x0_eeprom_params {
-       u8 rf_freq_off;
-       s16 temp_off;
-       s8 rssi_offset_2ghz[2];
-       s8 rssi_offset_5ghz[3];
-       s8 lna_gain_2ghz;
-       s8 lna_gain_5ghz[3];
-       u8 pa_type;
-
-       /* TX_PWR_CFG_* values from EEPROM for 20 and 40 Mhz bandwidths. */
-       u32 tx_pwr_cfg_2g[5][2];
-       u32 tx_pwr_cfg_5g[5][2];
-
-       u8 tx_pwr_per_chan[58];
-
-       struct reg_channel_bounds reg;
-
-       bool has_2ghz;
-       bool has_5ghz;
-};
-
-int mt76x0_eeprom_init(struct mt76x0_dev *dev);
-
-static inline u32 s6_validate(u32 reg)
-{
-       WARN_ON(reg & ~GENMASK(5, 0));
-       return reg & GENMASK(5, 0);
-}
-
-static inline int s6_to_int(u32 reg)
-{
-       int s6;
-
-       s6 = s6_validate(reg);
-       if (s6 & BIT(5))
-               s6 -= BIT(6);
-
-       return s6;
-}
-
-static inline u32 int_to_s6(int val)
+static inline s8 s6_to_s8(u32 val)
 {
-       if (val < -0x20)
-               return 0x20;
-       if (val > 0x1f)
-               return 0x1f;
+       s8 ret = val & GENMASK(5, 0);
 
-       return val & 0x3f;
+       if (ret & BIT(5))
+               ret -= BIT(6);
+       return ret;
 }
 
 #endif
index 7cdb3e740522b72001866d4cfe56d3140531a7c2..ee2b8e885608a6c8af56e3eb924d16fc7db98fc6 100644 (file)
 #include "eeprom.h"
 #include "trace.h"
 #include "mcu.h"
-#include "usb.h"
-
 #include "initvals.h"
 
-static void
-mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
+static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband)
 {
+       struct ieee80211_sta_vht_cap *vht_cap = &sband->vht_cap;
+       u16 mcs_map = 0;
        int i;
 
+       vht_cap->cap &= ~IEEE80211_VHT_CAP_RXLDPC;
+       for (i = 0; i < 8; i++) {
+               if (!i)
+                       mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_7 << (i * 2));
+               else
+                       mcs_map |=
+                               (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
+       }
+       vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+       vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+}
+
+static void
+mt76x0_set_wlan_state(struct mt76x02_dev *dev, u32 val, bool enable)
+{
+       u32 mask = MT_CMB_CTRL_XTAL_RDY | MT_CMB_CTRL_PLL_LD;
+
        /* Note: we don't turn off WLAN_CLK because that makes the device
         *       not respond properly on the probe path.
         *       In case anyone (PSM?) wants to use this function we can
@@ -42,32 +58,18 @@ mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
        mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
        udelay(20);
 
-       if (!enable)
-               return;
-
-       for (i = 200; i; i--) {
-               val = mt76_rr(dev, MT_CMB_CTRL);
-
-               if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
-                       break;
-
-               udelay(20);
-       }
-
        /* Note: vendor driver tries to disable/enable wlan here and retry
         *       but the code which does it is so buggy it must have never
         *       triggered, so don't bother.
         */
-       if (!i)
-               dev_err(dev->mt76.dev, "Error: PLL and XTAL check failed!\n");
+       if (enable && !mt76_poll(dev, MT_CMB_CTRL, mask, mask, 2000))
+               dev_err(dev->mt76.dev, "PLL and XTAL check failed\n");
 }
 
-void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
+void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset)
 {
        u32 val;
 
-       mutex_lock(&dev->hw_atomic_mutex);
-
        val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
 
        if (reset) {
@@ -89,54 +91,25 @@ void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
        udelay(20);
 
        mt76x0_set_wlan_state(dev, val, enable);
-
-       mutex_unlock(&dev->hw_atomic_mutex);
 }
+EXPORT_SYMBOL_GPL(mt76x0_chip_onoff);
 
-static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev)
+static void mt76x0_reset_csr_bbp(struct mt76x02_dev *dev)
 {
-       u32 val;
-
-       val = mt76_rr(dev, MT_PBF_SYS_CTRL);
-       val &= ~0x2000;
-       mt76_wr(dev, MT_PBF_SYS_CTRL, val);
-
-       mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR |
-                                        MT_MAC_SYS_CTRL_RESET_BBP);
-
+       mt76_wr(dev, MT_MAC_SYS_CTRL,
+               MT_MAC_SYS_CTRL_RESET_CSR |
+               MT_MAC_SYS_CTRL_RESET_BBP);
        msleep(200);
+       mt76_clear(dev, MT_MAC_SYS_CTRL,
+                  MT_MAC_SYS_CTRL_RESET_CSR |
+                  MT_MAC_SYS_CTRL_RESET_BBP);
 }
 
-static void mt76x0_init_usb_dma(struct mt76x0_dev *dev)
-{
-       u32 val;
-
-       val = mt76_rr(dev, MT_USB_DMA_CFG);
-
-       val |= FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
-              FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
-              MT_USB_DMA_CFG_RX_BULK_EN |
-              MT_USB_DMA_CFG_TX_BULK_EN;
-       if (dev->in_max_packet == 512)
-               val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
-       mt76_wr(dev, MT_USB_DMA_CFG, val);
-
-       val = mt76_rr(dev, MT_COM_REG0);
-       if (val & 1)
-               dev_dbg(dev->mt76.dev, "MCU not ready\n");
-
-       val = mt76_rr(dev, MT_USB_DMA_CFG);
-
-       val |= MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
-       mt76_wr(dev, MT_USB_DMA_CFG, val);
-       val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
-       mt76_wr(dev, MT_USB_DMA_CFG, val);
-}
-
-#define RANDOM_WRITE(dev, tab) \
-       mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, tab, ARRAY_SIZE(tab));
+#define RANDOM_WRITE(dev, tab)                 \
+       mt76_wr_rp(dev, MT_MCU_MEMMAP_WLAN,     \
+                  tab, ARRAY_SIZE(tab))
 
-static int mt76x0_init_bbp(struct mt76x0_dev *dev)
+static int mt76x0_init_bbp(struct mt76x02_dev *dev)
 {
        int ret, i;
 
@@ -159,30 +132,13 @@ static int mt76x0_init_bbp(struct mt76x0_dev *dev)
        return 0;
 }
 
-static void
-mt76_init_beacon_offsets(struct mt76x0_dev *dev)
-{
-       u16 base = MT_BEACON_BASE;
-       u32 regs[4] = {};
-       int i;
-
-       for (i = 0; i < 16; i++) {
-               u16 addr = dev->beacon_offsets[i];
-
-               regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
-       }
-
-       for (i = 0; i < 4; i++)
-               mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
-}
-
-static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
+static void mt76x0_init_mac_registers(struct mt76x02_dev *dev)
 {
        u32 reg;
 
        RANDOM_WRITE(dev, common_mac_reg_table);
 
-       mt76_init_beacon_offsets(dev);
+       mt76x02_set_beacon_offsets(&dev->mt76);
 
        /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
        RANDOM_WRITE(dev, mt76x0_mac_reg_table);
@@ -192,13 +148,6 @@ static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
        reg &= ~0x3;
        mt76_wr(dev, MT_MAC_SYS_CTRL, reg);
 
-       if (is_mt7610e(dev)) {
-               /* Disable COEX_EN */
-               reg = mt76_rr(dev, MT_COEXCFG0);
-               reg &= 0xFFFFFFFE;
-               mt76_wr(dev, MT_COEXCFG0, reg);
-       }
-
        /* Set 0x141C[15:12]=0xF */
        reg = mt76_rr(dev, MT_EXT_CCA_CFG);
        reg |= 0x0000F000;
@@ -216,115 +165,81 @@ static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
        reg &= ~0x000003FF;
        reg |= 0x00000201;
        mt76_wr(dev, MT_WMM_CTRL, reg);
-
-       /* TODO: Probably not needed */
-       mt76_wr(dev, 0x7028, 0);
-       mt76_wr(dev, 0x7010, 0);
-       mt76_wr(dev, 0x7024, 0);
-       msleep(10);
 }
 
-static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev)
+static int mt76x0_init_wcid_mem(struct mt76x02_dev *dev)
 {
        u32 *vals;
-       int i, ret;
+       int i;
 
-       vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+       vals = kmalloc(sizeof(*vals) * MT76_N_WCIDS * 2, GFP_KERNEL);
        if (!vals)
                return -ENOMEM;
 
-       for (i = 0; i < N_WCIDS; i++)  {
+       for (i = 0; i < MT76_N_WCIDS; i++)  {
                vals[i * 2] = 0xffffffff;
                vals[i * 2 + 1] = 0x00ffffff;
        }
 
-       ret = mt76x0_burst_write_regs(dev, MT_WCID_ADDR_BASE,
-                                     vals, N_WCIDS * 2);
+       mt76_wr_copy(dev, MT_WCID_ADDR_BASE, vals, MT76_N_WCIDS * 2);
        kfree(vals);
-
-       return ret;
+       return 0;
 }
 
-static int mt76x0_init_key_mem(struct mt76x0_dev *dev)
+static void mt76x0_init_key_mem(struct mt76x02_dev *dev)
 {
        u32 vals[4] = {};
 
-       return mt76x0_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
-                                       vals, ARRAY_SIZE(vals));
+       mt76_wr_copy(dev, MT_SKEY_MODE_BASE_0, vals, ARRAY_SIZE(vals));
 }
 
-static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev)
+static int mt76x0_init_wcid_attr_mem(struct mt76x02_dev *dev)
 {
        u32 *vals;
-       int i, ret;
+       int i;
 
-       vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+       vals = kmalloc(sizeof(*vals) * MT76_N_WCIDS * 2, GFP_KERNEL);
        if (!vals)
                return -ENOMEM;
 
-       for (i = 0; i < N_WCIDS * 2; i++)
+       for (i = 0; i < MT76_N_WCIDS * 2; i++)
                vals[i] = 1;
 
-       ret = mt76x0_burst_write_regs(dev, MT_WCID_ATTR_BASE,
-                                     vals, N_WCIDS * 2);
+       mt76_wr_copy(dev, MT_WCID_ATTR_BASE, vals, MT76_N_WCIDS * 2);
        kfree(vals);
-
-       return ret;
+       return 0;
 }
 
-static void mt76x0_reset_counters(struct mt76x0_dev *dev)
+static void mt76x0_reset_counters(struct mt76x02_dev *dev)
 {
-       mt76_rr(dev, MT_RX_STA_CNT0);
-       mt76_rr(dev, MT_RX_STA_CNT1);
-       mt76_rr(dev, MT_RX_STA_CNT2);
-       mt76_rr(dev, MT_TX_STA_CNT0);
-       mt76_rr(dev, MT_TX_STA_CNT1);
-       mt76_rr(dev, MT_TX_STA_CNT2);
+       mt76_rr(dev, MT_RX_STAT_0);
+       mt76_rr(dev, MT_RX_STAT_1);
+       mt76_rr(dev, MT_RX_STAT_2);
+       mt76_rr(dev, MT_TX_STA_0);
+       mt76_rr(dev, MT_TX_STA_1);
+       mt76_rr(dev, MT_TX_STA_2);
 }
 
-int mt76x0_mac_start(struct mt76x0_dev *dev)
+int mt76x0_mac_start(struct mt76x02_dev *dev)
 {
        mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
 
-       if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
-                      MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
+       if (!mt76x02_wait_for_wpdma(&dev->mt76, 200000))
                return -ETIMEDOUT;
 
-       dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
-               MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
-               MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
-               MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
-               MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
-               MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
-               MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
-       mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
+       mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
        mt76_wr(dev, MT_MAC_SYS_CTRL,
-                  MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
-
-       if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
-                      MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
-               return -ETIMEDOUT;
+               MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
 
-       return 0;
+       return !mt76x02_wait_for_wpdma(&dev->mt76, 50) ? -ETIMEDOUT : 0;
 }
+EXPORT_SYMBOL_GPL(mt76x0_mac_start);
 
-static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
+void mt76x0_mac_stop(struct mt76x02_dev *dev)
 {
-       int i, ok;
-
-       if (test_bit(MT76_REMOVED, &dev->mt76.state))
-               return;
-
-       mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
-                  MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
-                  MT_BEACON_TIME_CFG_BEACON_TX);
-
-       if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
-               dev_warn(dev->mt76.dev, "Warning: TX DMA did not stop!\n");
+       int i = 200, ok = 0;
 
        /* Page count on TxQ */
-       i = 200;
        while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
                       (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
                       (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
@@ -337,9 +252,7 @@ static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
                                         MT_MAC_SYS_CTRL_ENABLE_TX);
 
        /* Page count on RxQ */
-       ok = 0;
-       i = 200;
-       while (i--) {
+       for (i = 0; i < 200; i++) {
                if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
                    !mt76_rr(dev, 0x0a30) &&
                    !mt76_rr(dev, 0x0a34)) {
@@ -352,91 +265,45 @@ static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
 
        if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
                dev_warn(dev->mt76.dev, "Warning: MAC RX did not stop!\n");
-
-       if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
-               dev_warn(dev->mt76.dev, "Warning: RX DMA did not stop!\n");
-}
-
-void mt76x0_mac_stop(struct mt76x0_dev *dev)
-{
-       mt76x0_mac_stop_hw(dev);
-       flush_delayed_work(&dev->stat_work);
-       cancel_delayed_work_sync(&dev->stat_work);
-}
-
-static void mt76x0_stop_hardware(struct mt76x0_dev *dev)
-{
-       mt76x0_chip_onoff(dev, false, false);
 }
+EXPORT_SYMBOL_GPL(mt76x0_mac_stop);
 
-int mt76x0_init_hardware(struct mt76x0_dev *dev)
+int mt76x0_init_hardware(struct mt76x02_dev *dev)
 {
-       static const u16 beacon_offsets[16] = {
-               /* 512 byte per beacon */
-               0xc000, 0xc200, 0xc400, 0xc600,
-               0xc800, 0xca00, 0xcc00, 0xce00,
-               0xd000, 0xd200, 0xd400, 0xd600,
-               0xd800, 0xda00, 0xdc00, 0xde00
-       };
        int ret;
 
-       dev->beacon_offsets = beacon_offsets;
-
-       mt76x0_chip_onoff(dev, true, true);
-
-       ret = mt76x0_wait_asic_ready(dev);
-       if (ret)
-               goto err;
-       ret = mt76x0_mcu_init(dev);
-       if (ret)
-               goto err;
-
-       if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
-                           MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
-                           MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
-               ret = -EIO;
-               goto err;
-       }
+       if (!mt76x02_wait_for_wpdma(&dev->mt76, 1000))
+               return -EIO;
 
        /* Wait for ASIC ready after FW load. */
-       ret = mt76x0_wait_asic_ready(dev);
-       if (ret)
-               goto err;
+       if (!mt76x02_wait_for_mac(&dev->mt76))
+               return -ETIMEDOUT;
 
        mt76x0_reset_csr_bbp(dev);
-       mt76x0_init_usb_dma(dev);
-
-       mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0x0);
-       mt76_wr(dev, MT_TSO_CTRL, 0x0);
-
-       ret = mt76x0_mcu_cmd_init(dev);
+       ret = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, false);
        if (ret)
-               goto err;
-       ret = mt76x0_dma_init(dev);
-       if (ret)
-               goto err_mcu;
+               return ret;
 
        mt76x0_init_mac_registers(dev);
 
-       if (!mt76_poll_msec(dev, MT_MAC_STATUS,
-                           MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 1000)) {
-               ret = -EIO;
-               goto err_rx;
-       }
+       if (!mt76x02_wait_for_txrx_idle(&dev->mt76))
+               return -EIO;
 
        ret = mt76x0_init_bbp(dev);
        if (ret)
-               goto err_rx;
+               return ret;
+
+       dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
 
        ret = mt76x0_init_wcid_mem(dev);
        if (ret)
-               goto err_rx;
-       ret = mt76x0_init_key_mem(dev);
-       if (ret)
-               goto err_rx;
+               return ret;
+
+       mt76x0_init_key_mem(dev);
+
        ret = mt76x0_init_wcid_attr_mem(dev);
        if (ret)
-               goto err_rx;
+               return ret;
 
        mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
                                             MT_BEACON_TIME_CFG_SYNC_MODE |
@@ -445,276 +312,82 @@ int mt76x0_init_hardware(struct mt76x0_dev *dev)
 
        mt76x0_reset_counters(dev);
 
-       mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
-
-       mt76_wr(dev, MT_TXOP_CTRL_CFG,
-                  FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
-                  FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
-
        ret = mt76x0_eeprom_init(dev);
        if (ret)
-               goto err_rx;
+               return ret;
 
        mt76x0_phy_init(dev);
-       return 0;
-
-err_rx:
-       mt76x0_dma_cleanup(dev);
-err_mcu:
-       mt76x0_mcu_cmd_deinit(dev);
-err:
-       mt76x0_chip_onoff(dev, false, false);
-       return ret;
-}
-
-void mt76x0_cleanup(struct mt76x0_dev *dev)
-{
-       if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
-               return;
 
-       mt76x0_stop_hardware(dev);
-       mt76x0_dma_cleanup(dev);
-       mt76x0_mcu_cmd_deinit(dev);
+       return 0;
 }
+EXPORT_SYMBOL_GPL(mt76x0_init_hardware);
 
-struct mt76x0_dev *mt76x0_alloc_device(struct device *pdev)
+struct mt76x02_dev *
+mt76x0_alloc_device(struct device *pdev,
+                   const struct mt76_driver_ops *drv_ops,
+                   const struct ieee80211_ops *ops)
 {
-       struct ieee80211_hw *hw;
-       struct mt76x0_dev *dev;
+       struct mt76x02_dev *dev;
+       struct mt76_dev *mdev;
 
-       hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x0_ops);
-       if (!hw)
+       mdev = mt76_alloc_device(sizeof(*dev), ops);
+       if (!mdev)
                return NULL;
 
-       dev = hw->priv;
-       dev->mt76.dev = pdev;
-       dev->mt76.hw = hw;
-       mutex_init(&dev->usb_ctrl_mtx);
-       mutex_init(&dev->reg_atomic_mutex);
-       mutex_init(&dev->hw_atomic_mutex);
-       mutex_init(&dev->mutex);
-       spin_lock_init(&dev->tx_lock);
-       spin_lock_init(&dev->rx_lock);
-       spin_lock_init(&dev->mt76.lock);
-       spin_lock_init(&dev->mac_lock);
-       spin_lock_init(&dev->con_mon_lock);
-       atomic_set(&dev->avg_ampdu_len, 1);
-       skb_queue_head_init(&dev->tx_skb_done);
+       mdev->dev = pdev;
+       mdev->drv = drv_ops;
 
-       dev->stat_wq = alloc_workqueue("mt76x0", WQ_UNBOUND, 0);
-       if (!dev->stat_wq) {
-               ieee80211_free_hw(hw);
-               return NULL;
-       }
+       dev = container_of(mdev, struct mt76x02_dev, mt76);
+       mutex_init(&dev->phy_mutex);
+       atomic_set(&dev->avg_ampdu_len, 1);
 
        return dev;
 }
+EXPORT_SYMBOL_GPL(mt76x0_alloc_device);
 
-#define CHAN2G(_idx, _freq) {                  \
-       .band = NL80211_BAND_2GHZ,              \
-       .center_freq = (_freq),                 \
-       .hw_value = (_idx),                     \
-       .max_power = 30,                        \
-}
-
-static const struct ieee80211_channel mt76_channels_2ghz[] = {
-       CHAN2G(1, 2412),
-       CHAN2G(2, 2417),
-       CHAN2G(3, 2422),
-       CHAN2G(4, 2427),
-       CHAN2G(5, 2432),
-       CHAN2G(6, 2437),
-       CHAN2G(7, 2442),
-       CHAN2G(8, 2447),
-       CHAN2G(9, 2452),
-       CHAN2G(10, 2457),
-       CHAN2G(11, 2462),
-       CHAN2G(12, 2467),
-       CHAN2G(13, 2472),
-       CHAN2G(14, 2484),
-};
-
-#define CHAN5G(_idx, _freq) {                  \
-       .band = NL80211_BAND_5GHZ,              \
-       .center_freq = (_freq),                 \
-       .hw_value = (_idx),                     \
-       .max_power = 30,                        \
-}
-
-static const struct ieee80211_channel mt76_channels_5ghz[] = {
-       CHAN5G(36, 5180),
-       CHAN5G(40, 5200),
-       CHAN5G(44, 5220),
-       CHAN5G(46, 5230),
-       CHAN5G(48, 5240),
-       CHAN5G(52, 5260),
-       CHAN5G(56, 5280),
-       CHAN5G(60, 5300),
-       CHAN5G(64, 5320),
-
-       CHAN5G(100, 5500),
-       CHAN5G(104, 5520),
-       CHAN5G(108, 5540),
-       CHAN5G(112, 5560),
-       CHAN5G(116, 5580),
-       CHAN5G(120, 5600),
-       CHAN5G(124, 5620),
-       CHAN5G(128, 5640),
-       CHAN5G(132, 5660),
-       CHAN5G(136, 5680),
-       CHAN5G(140, 5700),
-};
-
-#define CCK_RATE(_idx, _rate) {                                        \
-       .bitrate = _rate,                                       \
-       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                 \
-       .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx,              \
-       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx),  \
-}
-
-#define OFDM_RATE(_idx, _rate) {                               \
-       .bitrate = _rate,                                       \
-       .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx,             \
-       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx,       \
-}
-
-static struct ieee80211_rate mt76_rates[] = {
-       CCK_RATE(0, 10),
-       CCK_RATE(1, 20),
-       CCK_RATE(2, 55),
-       CCK_RATE(3, 110),
-       OFDM_RATE(0, 60),
-       OFDM_RATE(1, 90),
-       OFDM_RATE(2, 120),
-       OFDM_RATE(3, 180),
-       OFDM_RATE(4, 240),
-       OFDM_RATE(5, 360),
-       OFDM_RATE(6, 480),
-       OFDM_RATE(7, 540),
-};
-
-static int
-mt76_init_sband(struct mt76x0_dev *dev, struct ieee80211_supported_band *sband,
-               const struct ieee80211_channel *chan, int n_chan,
-               struct ieee80211_rate *rates, int n_rates)
-{
-       struct ieee80211_sta_ht_cap *ht_cap;
-       void *chanlist;
-       int size;
-
-       size = n_chan * sizeof(*chan);
-       chanlist = devm_kmemdup(dev->mt76.dev, chan, size, GFP_KERNEL);
-       if (!chanlist)
-               return -ENOMEM;
-
-       sband->channels = chanlist;
-       sband->n_channels = n_chan;
-       sband->bitrates = rates;
-       sband->n_bitrates = n_rates;
-
-       ht_cap = &sband->ht_cap;
-       ht_cap->ht_supported = true;
-       ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
-                     IEEE80211_HT_CAP_GRN_FLD |
-                     IEEE80211_HT_CAP_SGI_20 |
-                     IEEE80211_HT_CAP_SGI_40 |
-                     (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
-
-       ht_cap->mcs.rx_mask[0] = 0xff;
-       ht_cap->mcs.rx_mask[4] = 0x1;
-       ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
-       ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
-       ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
-
-       return 0;
-}
-
-static int
-mt76_init_sband_2g(struct mt76x0_dev *dev)
-{
-       dev->mt76.hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->mt76.sband_2g.sband;
-
-       WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
-               ARRAY_SIZE(mt76_channels_2ghz));
-
-
-       return mt76_init_sband(dev, &dev->mt76.sband_2g.sband,
-                              mt76_channels_2ghz, ARRAY_SIZE(mt76_channels_2ghz),
-                              mt76_rates, ARRAY_SIZE(mt76_rates));
-}
-
-static int
-mt76_init_sband_5g(struct mt76x0_dev *dev)
-{
-       dev->mt76.hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->mt76.sband_5g.sband;
-
-       return mt76_init_sband(dev, &dev->mt76.sband_5g.sband,
-                              mt76_channels_5ghz, ARRAY_SIZE(mt76_channels_5ghz),
-                              mt76_rates + 4, ARRAY_SIZE(mt76_rates) - 4);
-}
-
-
-int mt76x0_register_device(struct mt76x0_dev *dev)
+int mt76x0_register_device(struct mt76x02_dev *dev)
 {
-       struct ieee80211_hw *hw = dev->mt76.hw;
+       struct mt76_dev *mdev = &dev->mt76;
+       struct ieee80211_hw *hw = mdev->hw;
        struct wiphy *wiphy = hw->wiphy;
        int ret;
 
        /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
         * entry no. 1 like it does in the vendor driver.
         */
-       dev->wcid_mask[0] |= 1;
+       mdev->wcid_mask[0] |= 1;
 
        /* init fake wcid for monitor interfaces */
-       dev->mon_wcid = devm_kmalloc(dev->mt76.dev, sizeof(*dev->mon_wcid),
-                                    GFP_KERNEL);
-       if (!dev->mon_wcid)
-               return -ENOMEM;
-       dev->mon_wcid->idx = 0xff;
-       dev->mon_wcid->hw_key_idx = -1;
+       mdev->global_wcid.idx = 0xff;
+       mdev->global_wcid.hw_key_idx = -1;
 
-       SET_IEEE80211_DEV(hw, dev->mt76.dev);
+       /* init antenna configuration */
+       mdev->antenna_mask = 1;
 
        hw->queues = 4;
-       ieee80211_hw_set(hw, SIGNAL_DBM);
-       ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
-       ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
-       ieee80211_hw_set(hw, AMPDU_AGGREGATION);
-       ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
        hw->max_rates = 1;
        hw->max_report_rates = 7;
        hw->max_rate_tries = 1;
+       hw->extra_tx_headroom = sizeof(struct mt76x02_txwi) + 4 + 2;
 
-       hw->sta_data_size = sizeof(struct mt76_sta);
-       hw->vif_data_size = sizeof(struct mt76_vif);
-
-       SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+       hw->sta_data_size = sizeof(struct mt76x02_sta);
+       hw->vif_data_size = sizeof(struct mt76x02_vif);
 
-       wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
        wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
 
-       if (dev->ee->has_2ghz) {
-               ret = mt76_init_sband_2g(dev);
-               if (ret)
-                       return ret;
-       }
-
-       if (dev->ee->has_5ghz) {
-               ret = mt76_init_sband_5g(dev);
-               if (ret)
-                       return ret;
-       }
-
-       dev->mt76.chandef.chan = &dev->mt76.sband_2g.sband.channels[0];
-
        INIT_DELAYED_WORK(&dev->mac_work, mt76x0_mac_work);
-       INIT_DELAYED_WORK(&dev->stat_work, mt76x0_tx_stat);
 
-       ret = ieee80211_register_hw(hw);
+       ret = mt76_register_device(mdev, true, mt76x02_rates,
+                                  ARRAY_SIZE(mt76x02_rates));
        if (ret)
                return ret;
 
+       /* overwrite unsupported features */
+       if (mdev->cap.has_5ghz)
+               mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband);
+
        mt76x0_init_debugfs(dev);
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(mt76x0_register_device);
index 24afcfd94b4e63f934259463e410ff627adbdaea..236dce6860b465782d5504008b02915dca05fb4c 100644 (file)
@@ -2,6 +2,7 @@
  * (c) Copyright 2002-2010, Ralink Technology, Inc.
  * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
  * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2
 #include "phy.h"
 
 static const struct mt76_reg_pair common_mac_reg_table[] = {
-#if 1
-       {MT_BCN_OFFSET(0),                      0xf8f0e8e0}, /* 0x3800(e0), 0x3A00(e8), 0x3C00(f0), 0x3E00(f8), 512B for each beacon */
-       {MT_BCN_OFFSET(1),                      0x6f77d0c8}, /* 0x3200(c8), 0x3400(d0), 0x1DC0(77), 0x1BC0(6f), 512B for each beacon */
-#endif
-
-       {MT_LEGACY_BASIC_RATE,          0x0000013f}, /*  Basic rate set bitmap*/
-       {MT_HT_BASIC_RATE,              0x00008003}, /* Basic HT rate set , 20M, MCS=3, MM. Format is the same as in TXWI.*/
-       {MT_MAC_SYS_CTRL,               0x00}, /* 0x1004, , default Disable RX*/
-       {MT_RX_FILTR_CFG,               0x17f97}, /*0x1400  , RX filter control,  */
-       {MT_BKOFF_SLOT_CFG,     0x209}, /* default set short slot time, CC_DELAY_TIME should be 2        */
-       /*{TX_SW_CFG0,          0x40a06},  Gary,2006-08-23 */
-       {MT_TX_SW_CFG0,         0x0},           /* Gary,2008-05-21 for CWC test */
-       {MT_TX_SW_CFG1,         0x80606}, /* Gary,2006-08-23 */
-       {MT_TX_LINK_CFG,                0x1020},                /* Gary,2006-08-23 */
-       /*{TX_TIMEOUT_CFG,      0x00182090},     CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT*/
-       {MT_TX_TIMEOUT_CFG,     0x000a2090},    /* CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT , Modify for 2860E ,2007-08-01*/
-       {MT_MAX_LEN_CFG,                0xa0fff | 0x00001000},  /* 0x3018, MAX frame length. Max PSDU = 16kbytes.*/
-       {MT_LED_CFG,            0x7f031e46}, /* Gary, 2006-08-23*/
-
-       {MT_PBF_TX_MAX_PCNT,            0x1fbf1f1f /*0xbfbf3f1f*/},
-       {MT_PBF_RX_MAX_PCNT,            0x9f},
-
-       /*{TX_RTY_CFG,                  0x6bb80408},     Jan, 2006/11/16*/
-/* WMM_ACM_SUPPORT */
-/*     {TX_RTY_CFG,                    0x6bb80101},     sample*/
-       {MT_TX_RETRY_CFG,                       0x47d01f0f},    /* Jan, 2006/11/16, Set TxWI->ACK =0 in Probe Rsp Modify for 2860E ,2007-08-03*/
-
-       {MT_AUTO_RSP_CFG,                       0x00000013},    /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
-       {MT_CCK_PROT_CFG,                       0x05740003 /*0x01740003*/},     /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
-       {MT_OFDM_PROT_CFG,                      0x05740003 /*0x01740003*/},     /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
-       {MT_PBF_CFG,                            0xf40006},              /* Only enable Queue 2*/
-       {MT_MM40_PROT_CFG,                      0x3F44084},             /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
-       {MT_WPDMA_GLO_CFG,                      0x00000030},
-       {MT_GF20_PROT_CFG,                      0x01744004},    /* set 19:18 --> Short NAV for MIMO PS*/
-       {MT_GF40_PROT_CFG,                      0x03F44084},
-       {MT_MM20_PROT_CFG,                      0x01744004},
-       {MT_TXOP_CTRL_CFG,                      0x0000583f, /*0x0000243f*/ /*0x000024bf*/},     /*Extension channel backoff.*/
-       {MT_TX_RTS_CFG,                 0x00092b20},
-
-       {MT_EXP_ACK_TIME,                       0x002400ca},    /* default value */
-       {MT_TXOP_HLDR_ET,                       0x00000002},
-
-       /* Jerry comments 2008/01/16: we use SIFS = 10us in CCK defaultly, but it seems that 10us
-               is too small for INTEL 2200bg card, so in MBSS mode, the delta time between beacon0
-               and beacon1 is SIFS (10us), so if INTEL 2200bg card connects to BSS0, the ping
-               will always lost. So we change the SIFS of CCK from 10us to 16us. */
-       {MT_XIFS_TIME_CFG,                      0x33a41010},
-       {MT_PWR_PIN_CFG,                        0x00000000},
+       { MT_BCN_OFFSET(0),             0xf8f0e8e0 },
+       { MT_BCN_OFFSET(1),             0x6f77d0c8 },
+       { MT_LEGACY_BASIC_RATE,         0x0000013f },
+       { MT_HT_BASIC_RATE,             0x00008003 },
+       { MT_MAC_SYS_CTRL,              0x00000000 },
+       { MT_RX_FILTR_CFG,              0x00017f97 },
+       { MT_BKOFF_SLOT_CFG,            0x00000209 },
+       { MT_TX_SW_CFG0,                0x00000000 },
+       { MT_TX_SW_CFG1,                0x00080606 },
+       { MT_TX_LINK_CFG,               0x00001020 },
+       { MT_TX_TIMEOUT_CFG,            0x000a2090 },
+       { MT_MAX_LEN_CFG,               0xa0fff | 0x00001000 },
+       { MT_LED_CFG,                   0x7f031e46 },
+       { MT_PBF_TX_MAX_PCNT,           0x1fbf1f1f },
+       { MT_PBF_RX_MAX_PCNT,           0x0000fe9f },
+       { MT_TX_RETRY_CFG,              0x47d01f0f },
+       { MT_AUTO_RSP_CFG,              0x00000013 },
+       { MT_CCK_PROT_CFG,              0x05740003 },
+       { MT_OFDM_PROT_CFG,             0x05740003 },
+       { MT_PBF_CFG,                   0x00f40006 },
+       { MT_WPDMA_GLO_CFG,             0x00000030 },
+       { MT_GF20_PROT_CFG,             0x01744004 },
+       { MT_GF40_PROT_CFG,             0x03f44084 },
+       { MT_MM20_PROT_CFG,             0x01744004 },
+       { MT_MM40_PROT_CFG,             0x03f54084 },
+       { MT_TXOP_CTRL_CFG,             0x0000583f },
+       { MT_TX_RTS_CFG,                0x00092b20 },
+       { MT_EXP_ACK_TIME,              0x002400ca },
+       { MT_TXOP_HLDR_ET,              0x00000002 },
+       { MT_XIFS_TIME_CFG,             0x33a41010 },
+       { MT_PWR_PIN_CFG,               0x00000000 },
 };
 
 static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
-       /* {MT_IOCFG_6,         0xA0040080 }, */
-       {MT_PBF_SYS_CTRL,       0x00080c00 },
-       {MT_PBF_CFG,            0x77723c1f },
-       {MT_FCE_PSE_CTRL,       0x00000001 },
-
-       {MT_AMPDU_MAX_LEN_20M1S,        0xBAA99887 },
-
-       /* Delay bb_tx_pe for proper tx_mcs_pwr update */
-       {MT_TX_SW_CFG0,         0x00000601 },
-
-       /* Set rf_tx_pe deassert time to 1us by Chee's comment @MT7650_CR_setting_1018.xlsx */
-       {MT_TX_SW_CFG1,         0x00040000 },
-       {MT_TX_SW_CFG2,         0x00000000 },
-
-       /* disable Tx info report */
-       {0xa44,         0x0000000 },
-
-       {MT_HEADER_TRANS_CTRL_REG, 0x0},
-       {MT_TSO_CTRL,           0x0},
-
-       /* BB_PA_MODE_CFG0(0x1214) Keep default value @20120903 */
-       {MT_BB_PA_MODE_CFG1,    0x00500055},
-
-       /* RF_PA_MODE_CFG0(0x121C) Keep default value @20120903 */
-       {MT_RF_PA_MODE_CFG1,    0x00500055},
-
-       {MT_TX_ALC_CFG_0,       0x2F2F000C},
-       {MT_TX0_BB_GAIN_ATTEN,  0x00000000}, /* set BBP atten gain = 0 */
-
-       {MT_TX_PWR_CFG_0, 0x3A3A3A3A},
-       {MT_TX_PWR_CFG_1, 0x3A3A3A3A},
-       {MT_TX_PWR_CFG_2, 0x3A3A3A3A},
-       {MT_TX_PWR_CFG_3, 0x3A3A3A3A},
-       {MT_TX_PWR_CFG_4, 0x3A3A3A3A},
-       {MT_TX_PWR_CFG_7, 0x3A3A3A3A},
-       {MT_TX_PWR_CFG_8, 0x3A},
-       {MT_TX_PWR_CFG_9, 0x3A},
-       /* Enable Tx length > 4095 byte */
-       {0x150C,                0x00000002},
-
-       /* Disable bt_abort_tx_en(0x1238[21] = 0) which is not used at MT7650 */
-       {0x1238,                0x001700C8},
-       /* PMU_OCLEVEL<5:1> from default <5'b10010> to <5'b11011> for normal driver */
-       /* {MT_LDO_CTRL_0,              0x00A647B6}, */
-
-       /* Default LDO_DIG supply 1.26V, change to 1.2V */
-       {MT_LDO_CTRL_1,         0x6B006464 },
-/*
-       {MT_HT_BASIC_RATE,      0x00004003 },
-       {MT_HT_CTRL_CFG,        0x000001FF },
-*/
+       { MT_IOCFG_6,                   0xa0040080 },
+       { MT_PBF_SYS_CTRL,              0x00080c00 },
+       { MT_PBF_CFG,                   0x77723c1f },
+       { MT_FCE_PSE_CTRL,              0x00000001 },
+       { MT_AMPDU_MAX_LEN_20M1S,       0xAAA99887 },
+       { MT_TX_SW_CFG0,                0x00000601 },
+       { MT_TX_SW_CFG1,                0x00040000 },
+       { MT_TX_SW_CFG2,                0x00000000 },
+       { 0xa44,                        0x00000000 },
+       { MT_HEADER_TRANS_CTRL_REG,     0x00000000 },
+       { MT_TSO_CTRL,                  0x00000000 },
+       { MT_BB_PA_MODE_CFG1,           0x00500055 },
+       { MT_RF_PA_MODE_CFG1,           0x00500055 },
+       { MT_TX_ALC_CFG_0,              0x2F2F000C },
+       { MT_TX0_BB_GAIN_ATTEN,         0x00000000 },
+       { MT_TX_PWR_CFG_0,              0x3A3A3A3A },
+       { MT_TX_PWR_CFG_1,              0x3A3A3A3A },
+       { MT_TX_PWR_CFG_2,              0x3A3A3A3A },
+       { MT_TX_PWR_CFG_3,              0x3A3A3A3A },
+       { MT_TX_PWR_CFG_4,              0x3A3A3A3A },
+       { MT_TX_PWR_CFG_7,              0x3A3A3A3A },
+       { MT_TX_PWR_CFG_8,              0x0000003A },
+       { MT_TX_PWR_CFG_9,              0x0000003A },
+       { 0x150C,                       0x00000002 },
+       { 0x1238,                       0x001700C8 },
+       { MT_LDO_CTRL_0,                0x00A647B6 },
+       { MT_LDO_CTRL_1,                0x6B006464 },
+       { MT_HT_BASIC_RATE,             0x00004003 },
+       { MT_HT_CTRL_CFG,               0x000001FF },
+       { MT_TXOP_HLDR_ET,              0x00000000 },
+       { MT_PN_PAD_MODE,               0x00000003 },
 };
 
-
 static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
-       {MT_BBP(CORE, 1), 0x00000002},
-       {MT_BBP(CORE, 4), 0x00000000},
-       {MT_BBP(CORE, 24), 0x00000000},
-       {MT_BBP(CORE, 32), 0x4003000a},
-       {MT_BBP(CORE, 42), 0x00000000},
-       {MT_BBP(CORE, 44), 0x00000000},
-
-       {MT_BBP(IBI, 11), 0x00000080},
-
-       /*
-               0x2300[5] Default Antenna:
-               0 for WIFI main antenna
-               1 for WIFI aux  antenna
-
-       */
-       {MT_BBP(AGC, 0), 0x00021400},
-       {MT_BBP(AGC, 1), 0x00000003},
-       {MT_BBP(AGC, 2), 0x003A6464},
-       {MT_BBP(AGC, 15), 0x88A28CB8},
-       {MT_BBP(AGC, 22), 0x00001E21},
-       {MT_BBP(AGC, 23), 0x0000272C},
-       {MT_BBP(AGC, 24), 0x00002F3A},
-       {MT_BBP(AGC, 25), 0x8000005A},
-       {MT_BBP(AGC, 26), 0x007C2005},
-       {MT_BBP(AGC, 34), 0x000A0C0C},
-       {MT_BBP(AGC, 37), 0x2121262C},
-       {MT_BBP(AGC, 41), 0x38383E45},
-       {MT_BBP(AGC, 57), 0x00001010},
-       {MT_BBP(AGC, 59), 0xBAA20E96},
-       {MT_BBP(AGC, 63), 0x00000001},
-
-       {MT_BBP(TXC, 0), 0x00280403},
-       {MT_BBP(TXC, 1), 0x00000000},
-
-       {MT_BBP(RXC, 1), 0x00000012},
-       {MT_BBP(RXC, 2), 0x00000011},
-       {MT_BBP(RXC, 3), 0x00000005},
-       {MT_BBP(RXC, 4), 0x00000000},
-       {MT_BBP(RXC, 5), 0xF977C4EC},
-       {MT_BBP(RXC, 7), 0x00000090},
-
-       {MT_BBP(TXO, 8), 0x00000000},
-
-       {MT_BBP(TXBE, 0), 0x00000000},
-       {MT_BBP(TXBE, 4), 0x00000004},
-       {MT_BBP(TXBE, 6), 0x00000000},
-       {MT_BBP(TXBE, 8), 0x00000014},
-       {MT_BBP(TXBE, 9), 0x20000000},
-       {MT_BBP(TXBE, 10), 0x00000000},
-       {MT_BBP(TXBE, 12), 0x00000000},
-       {MT_BBP(TXBE, 13), 0x00000000},
-       {MT_BBP(TXBE, 14), 0x00000000},
-       {MT_BBP(TXBE, 15), 0x00000000},
-       {MT_BBP(TXBE, 16), 0x00000000},
-       {MT_BBP(TXBE, 17), 0x00000000},
-
-       {MT_BBP(RXFE, 1), 0x00008800}, /* Add for E3 */
-       {MT_BBP(RXFE, 3), 0x00000000},
-       {MT_BBP(RXFE, 4), 0x00000000},
-
-       {MT_BBP(RXO, 13), 0x00000092},
-       {MT_BBP(RXO, 14), 0x00060612},
-       {MT_BBP(RXO, 15), 0xC8321B18},
-       {MT_BBP(RXO, 16), 0x0000001E},
-       {MT_BBP(RXO, 17), 0x00000000},
-       {MT_BBP(RXO, 18), 0xCC00A993},
-       {MT_BBP(RXO, 19), 0xB9CB9CB9},
-       {MT_BBP(RXO, 20), 0x26c00057},
-       {MT_BBP(RXO, 21), 0x00000001},
-       {MT_BBP(RXO, 24), 0x00000006},
+       { MT_BBP(CORE, 1),      0x00000002 },
+       { MT_BBP(CORE, 4),      0x00000000 },
+       { MT_BBP(CORE, 24),     0x00000000 },
+       { MT_BBP(CORE, 32),     0x4003000a },
+       { MT_BBP(CORE, 42),     0x00000000 },
+       { MT_BBP(CORE, 44),     0x00000000 },
+       { MT_BBP(IBI, 11),      0x0FDE8081 },
+       { MT_BBP(AGC, 0),       0x00021400 },
+       { MT_BBP(AGC, 1),       0x00000003 },
+       { MT_BBP(AGC, 2),       0x003A6464 },
+       { MT_BBP(AGC, 15),      0x88A28CB8 },
+       { MT_BBP(AGC, 22),      0x00001E21 },
+       { MT_BBP(AGC, 23),      0x0000272C },
+       { MT_BBP(AGC, 24),      0x00002F3A },
+       { MT_BBP(AGC, 25),      0x8000005A },
+       { MT_BBP(AGC, 26),      0x007C2005 },
+       { MT_BBP(AGC, 33),      0x00003238 },
+       { MT_BBP(AGC, 34),      0x000A0C0C },
+       { MT_BBP(AGC, 37),      0x2121262C },
+       { MT_BBP(AGC, 41),      0x38383E45 },
+       { MT_BBP(AGC, 57),      0x00001010 },
+       { MT_BBP(AGC, 59),      0xBAA20E96 },
+       { MT_BBP(AGC, 63),      0x00000001 },
+       { MT_BBP(TXC, 0),       0x00280403 },
+       { MT_BBP(TXC, 1),       0x00000000 },
+       { MT_BBP(RXC, 1),       0x00000012 },
+       { MT_BBP(RXC, 2),       0x00000011 },
+       { MT_BBP(RXC, 3),       0x00000005 },
+       { MT_BBP(RXC, 4),       0x00000000 },
+       { MT_BBP(RXC, 5),       0xF977C4EC },
+       { MT_BBP(RXC, 7),       0x00000090 },
+       { MT_BBP(TXO, 8),       0x00000000 },
+       { MT_BBP(TXBE, 0),      0x00000000 },
+       { MT_BBP(TXBE, 4),      0x00000004 },
+       { MT_BBP(TXBE, 6),      0x00000000 },
+       { MT_BBP(TXBE, 8),      0x00000014 },
+       { MT_BBP(TXBE, 9),      0x20000000 },
+       { MT_BBP(TXBE, 10),     0x00000000 },
+       { MT_BBP(TXBE, 12),     0x00000000 },
+       { MT_BBP(TXBE, 13),     0x00000000 },
+       { MT_BBP(TXBE, 14),     0x00000000 },
+       { MT_BBP(TXBE, 15),     0x00000000 },
+       { MT_BBP(TXBE, 16),     0x00000000 },
+       { MT_BBP(TXBE, 17),     0x00000000 },
+       { MT_BBP(RXFE, 1),      0x00008800 },
+       { MT_BBP(RXFE, 3),      0x00000000 },
+       { MT_BBP(RXFE, 4),      0x00000000 },
+       { MT_BBP(RXO, 13),      0x00000192 },
+       { MT_BBP(RXO, 14),      0x00060612 },
+       { MT_BBP(RXO, 15),      0xC8321B18 },
+       { MT_BBP(RXO, 16),      0x0000001E },
+       { MT_BBP(RXO, 17),      0x00000000 },
+       { MT_BBP(RXO, 18),      0xCC00A993 },
+       { MT_BBP(RXO, 19),      0xB9CB9CB9 },
+       { MT_BBP(RXO, 20),      0x26c00057 },
+       { MT_BBP(RXO, 21),      0x00000001 },
+       { MT_BBP(RXO, 24),      0x00000006 },
+       { MT_BBP(RXO, 28),      0x0000003F },
 };
 
 static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(AGC, 8), 0x0E344EF0}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 8), 0x122C54F2}},
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(AGC, 4),       0x1FEDA049 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 4),       0x1FECA054 } },
+
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(AGC, 6),       0x00000045 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 6),       0x0000000A } },
 
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(AGC, 14), 0x310F2E39}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 14), 0x310F2A3F}},
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(AGC, 8),       0x16344EF0 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 8),       0x122C54F2 } },
 
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(AGC, 32), 0x00003230}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 32), 0x0000181C}},
+       { RF_G_BAND | RF_BW_20,                         { MT_BBP(AGC, 12),      0x05052879 } },
+       { RF_G_BAND | RF_BW_40,                         { MT_BBP(AGC, 12),      0x050528F9 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 12),      0x050528F9 } },
 
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(AGC, 33), 0x00003240}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 33), 0x00003218}},
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(AGC, 13),      0x35050004 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 13),      0x2C3A0406 } },
 
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(AGC, 35), 0x11112016}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 35), 0x11112016}},
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(AGC, 14),      0x310F2E3C } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 14),      0x310F2A3F } },
 
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(RXO, 28), 0x0000008A}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(RXO, 28), 0x0000008A}},
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(AGC, 26),      0x007C2005 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 26),      0x007C2005 } },
 
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(AGC, 4), 0x1FEDA049}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 4), 0x1FECA054}},
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(AGC, 27),      0x000000E1 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 27),      0x000000EC } },
 
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(AGC, 6), 0x00000045}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 6), 0x0000000A}},
+       { RF_G_BAND | RF_BW_20,                         { MT_BBP(AGC, 28),      0x00060806 } },
+       { RF_G_BAND | RF_BW_40,                         { MT_BBP(AGC, 28),      0x00050806 } },
+       { RF_A_BAND | RF_BW_40,                         { MT_BBP(AGC, 28),      0x00060801 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_80,              { MT_BBP(AGC, 28),      0x00060806 } },
 
-       {RF_G_BAND | RF_BW_20,                          {MT_BBP(AGC, 12), 0x05052879}},
-       {RF_G_BAND | RF_BW_40,                          {MT_BBP(AGC, 12), 0x050528F9}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 12), 0x050528F9}},
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(RXO, 28),      0x0000008A } },
 
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(AGC, 13), 0x35050004}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 13), 0x2C3A0406}},
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(AGC, 31),      0x00000E23 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 31),      0x00000E13 } },
 
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(AGC, 27), 0x000000E1}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 27), 0x000000EC}},
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(AGC, 32),      0x00003218 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 32),      0x0000181C } },
 
-       {RF_G_BAND | RF_BW_20,                          {MT_BBP(AGC, 28), 0x00060806}},
-       {RF_G_BAND | RF_BW_40,                          {MT_BBP(AGC, 28), 0x00050806}},
-       {RF_A_BAND | RF_BW_40,                          {MT_BBP(AGC, 28), 0x00060801}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_80,               {MT_BBP(AGC, 28), 0x00060806}},
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(AGC, 33),      0x00003240 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 33),      0x00003218 } },
 
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(AGC, 31), 0x00000F23}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 31), 0x00000F13}},
+       { RF_G_BAND | RF_BW_20,                         { MT_BBP(AGC, 35),      0x11111616 } },
+       { RF_G_BAND | RF_BW_40,                         { MT_BBP(AGC, 35),      0x11111516 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 35),      0x11111111 } },
 
-       {RF_G_BAND | RF_BW_20,                          {MT_BBP(AGC, 39), 0x2A2A3036}},
-       {RF_G_BAND | RF_BW_40,                          {MT_BBP(AGC, 39), 0x2A2A2C36}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(AGC, 39), 0x2A2A3036}},
-       {RF_A_BAND | RF_BW_80,                          {MT_BBP(AGC, 39), 0x2A2A2A36}},
+       { RF_G_BAND | RF_BW_20,                         { MT_BBP(AGC, 39),      0x2A2A3036 } },
+       { RF_G_BAND | RF_BW_40,                         { MT_BBP(AGC, 39),      0x2A2A2C36 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 39),      0x2A2A2A2A } },
 
-       {RF_G_BAND | RF_BW_20,                          {MT_BBP(AGC, 43), 0x27273438}},
-       {RF_G_BAND | RF_BW_40,                          {MT_BBP(AGC, 43), 0x27272D38}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 43), 0x27272B30}},
+       { RF_G_BAND | RF_BW_20,                         { MT_BBP(AGC, 43),      0x27273438 } },
+       { RF_G_BAND | RF_BW_40,                         { MT_BBP(AGC, 43),      0x27272D38 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 43),      0x27271A1A } },
 
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(AGC, 51), 0x17171C1C}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 51), 0xFFFFFFFF}},
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(AGC, 51),      0x17171C1C } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 51),      0xFFFFFFFF } },
 
-       {RF_G_BAND | RF_BW_20,                          {MT_BBP(AGC, 53), 0x26262A2F}},
-       {RF_G_BAND | RF_BW_40,                          {MT_BBP(AGC, 53), 0x2626322F}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 53), 0xFFFFFFFF}},
+       { RF_G_BAND | RF_BW_20,                         { MT_BBP(AGC, 53),      0x26262A2F } },
+       { RF_G_BAND | RF_BW_40,                         { MT_BBP(AGC, 53),      0x2626322F } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 53),      0xFFFFFFFF } },
 
-       {RF_G_BAND | RF_BW_20,                          {MT_BBP(AGC, 55), 0x40404E58}},
-       {RF_G_BAND | RF_BW_40,                          {MT_BBP(AGC, 55), 0x40405858}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 55), 0xFFFFFFFF}},
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(AGC, 55),      0x40404040 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 55),      0xFFFFFFFF } },
 
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(AGC, 58), 0x00001010}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(AGC, 58), 0x00000000}},
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(AGC, 58),      0x00001010 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(AGC, 58),      0x00000000 } },
 
-       {RF_G_BAND | RF_BW_20 | RF_BW_40,               {MT_BBP(RXFE, 0), 0x3D5000E0}},
-       {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,    {MT_BBP(RXFE, 0), 0x895000E0}},
+       { RF_G_BAND | RF_BW_20 | RF_BW_40,              { MT_BBP(RXFE, 0),      0x3D5000E0 } },
+       { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80,   { MT_BBP(RXFE, 0),      0x895000E0 } },
 };
 
 static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
-       {MT_BBP(CAL, 47), 0x000010F0 },
-       {MT_BBP(CAL, 48), 0x00008080 },
-       {MT_BBP(CAL, 49), 0x00000F07 },
-       {MT_BBP(CAL, 50), 0x00000040 },
-       {MT_BBP(CAL, 51), 0x00000404 },
-       {MT_BBP(CAL, 52), 0x00080803 },
-       {MT_BBP(CAL, 53), 0x00000704 },
-       {MT_BBP(CAL, 54), 0x00002828 },
-       {MT_BBP(CAL, 55), 0x00005050 },
+       { MT_BBP(CAL, 47), 0x000010F0 },
+       { MT_BBP(CAL, 48), 0x00008080 },
+       { MT_BBP(CAL, 49), 0x00000F07 },
+       { MT_BBP(CAL, 50), 0x00000040 },
+       { MT_BBP(CAL, 51), 0x00000404 },
+       { MT_BBP(CAL, 52), 0x00080803 },
+       { MT_BBP(CAL, 53), 0x00000704 },
+       { MT_BBP(CAL, 54), 0x00002828 },
+       { MT_BBP(CAL, 55), 0x00005050 },
 };
 
 #endif
index 91a84be36d3bf4d3262aafc983a1a91f6bd8b75d..7a422c5902113d46e5c02096790671ea49b5620a 100644 (file)
  * GNU General Public License for more details.
  */
 
-#include "mt76x0.h"
-#include "trace.h"
 #include <linux/etherdevice.h>
 
-static void
-mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
-                        enum nl80211_band band)
-{
-       u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
-       txrate->idx = 0;
-       txrate->flags = 0;
-       txrate->count = 1;
-
-       switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
-       case MT_PHY_TYPE_OFDM:
-               if (band == NL80211_BAND_2GHZ)
-                       idx += 4;
-
-               txrate->idx = idx;
-               return;
-       case MT_PHY_TYPE_CCK:
-               if (idx >= 8)
-                       idx -= 8;
-
-               txrate->idx = idx;
-               return;
-       case MT_PHY_TYPE_HT_GF:
-               txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
-               /* fall through */
-       case MT_PHY_TYPE_HT:
-               txrate->flags |= IEEE80211_TX_RC_MCS;
-               txrate->idx = idx;
-               break;
-       case MT_PHY_TYPE_VHT:
-               txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
-               txrate->idx = idx;
-               break;
-       default:
-               WARN_ON(1);
-               return;
-       }
-
-       switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
-       case MT_PHY_BW_20:
-               break;
-       case MT_PHY_BW_40:
-               txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
-               break;
-       case MT_PHY_BW_80:
-               txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
-               break;
-       default:
-               WARN_ON(1);
-               return;
-       }
-
-       if (rate & MT_RXWI_RATE_SGI)
-               txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
-}
-
-static void
-mt76_mac_fill_tx_status(struct mt76x0_dev *dev, struct ieee80211_tx_info *info,
-                       struct mt76_tx_status *st, int n_frames)
-{
-       struct ieee80211_tx_rate *rate = info->status.rates;
-       int cur_idx, last_rate;
-       int i;
-
-       if (!n_frames)
-               return;
-
-       last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
-       mt76_mac_process_tx_rate(&rate[last_rate], st->rate,
-                                dev->mt76.chandef.chan->band);
-       if (last_rate < IEEE80211_TX_MAX_RATES - 1)
-               rate[last_rate + 1].idx = -1;
-
-       cur_idx = rate[last_rate].idx + last_rate;
-       for (i = 0; i <= last_rate; i++) {
-               rate[i].flags = rate[last_rate].flags;
-               rate[i].idx = max_t(int, 0, cur_idx - i);
-               rate[i].count = 1;
-       }
-
-       rate[last_rate - 1].count = st->retry + 1 - last_rate;
-
-       info->status.ampdu_len = n_frames;
-       info->status.ampdu_ack_len = st->success ? n_frames : 0;
-
-       if (st->pktid & MT_TXWI_PKTID_PROBE)
-               info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
-
-       if (st->aggr)
-               info->flags |= IEEE80211_TX_CTL_AMPDU |
-                              IEEE80211_TX_STAT_AMPDU;
-
-       if (!st->ack_req)
-               info->flags |= IEEE80211_TX_CTL_NO_ACK;
-       else if (st->success)
-               info->flags |= IEEE80211_TX_STAT_ACK;
-}
-
-u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
-                        const struct ieee80211_tx_rate *rate, u8 *nss_val)
-{
-       u16 rateval;
-       u8 phy, rate_idx;
-       u8 nss = 1;
-       u8 bw = 0;
-
-       if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
-               rate_idx = rate->idx;
-               nss = 1 + (rate->idx >> 4);
-               phy = MT_PHY_TYPE_VHT;
-               if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
-                       bw = 2;
-               else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-                       bw = 1;
-       } else if (rate->flags & IEEE80211_TX_RC_MCS) {
-               rate_idx = rate->idx;
-               nss = 1 + (rate->idx >> 3);
-               phy = MT_PHY_TYPE_HT;
-               if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
-                       phy = MT_PHY_TYPE_HT_GF;
-               if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-                       bw = 1;
-       } else {
-               const struct ieee80211_rate *r;
-               int band = dev->mt76.chandef.chan->band;
-               u16 val;
-
-               r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
-               if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
-                       val = r->hw_value_short;
-               else
-                       val = r->hw_value;
-
-               phy = val >> 8;
-               rate_idx = val & 0xff;
-               bw = 0;
-       }
-
-       rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
-       rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
-       rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
-       if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
-               rateval |= MT_RXWI_RATE_SGI;
-
-       *nss_val = nss;
-       return cpu_to_le16(rateval);
-}
-
-void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
-                           const struct ieee80211_tx_rate *rate)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->mt76.lock, flags);
-       wcid->tx_rate = mt76x0_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
-       wcid->tx_rate_set = true;
-       spin_unlock_irqrestore(&dev->mt76.lock, flags);
-}
-
-struct mt76_tx_status mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev)
-{
-       struct mt76_tx_status stat = {};
-       u32 stat2, stat1;
-
-       stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
-       stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
-
-       stat.valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
-       stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
-       stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
-       stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
-       stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
-       stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
-
-       stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
-       stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
-
-       return stat;
-}
-
-void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update)
-{
-       struct ieee80211_tx_info info = {};
-       struct ieee80211_sta *sta = NULL;
-       struct mt76_wcid *wcid = NULL;
-       struct mt76_sta *msta = NULL;
-
-       rcu_read_lock();
-       if (stat->wcid < ARRAY_SIZE(dev->wcid))
-               wcid = rcu_dereference(dev->wcid[stat->wcid]);
-
-       if (wcid) {
-               void *priv;
-               priv = msta = container_of(wcid, struct mt76_sta, wcid);
-               sta = container_of(priv, struct ieee80211_sta, drv_priv);
-       }
-
-       if (msta && stat->aggr) {
-               u32 stat_val, stat_cache;
-
-               stat_val = stat->rate;
-               stat_val |= ((u32) stat->retry) << 16;
-               stat_cache = msta->status.rate;
-               stat_cache |= ((u32) msta->status.retry) << 16;
-
-               if (*update == 0 && stat_val == stat_cache &&
-                   stat->wcid == msta->status.wcid && msta->n_frames < 32) {
-                       msta->n_frames++;
-                       goto out;
-               }
-
-               mt76_mac_fill_tx_status(dev, &info, &msta->status,
-                                       msta->n_frames);
-               msta->status = *stat;
-               msta->n_frames = 1;
-               *update = 0;
-       } else {
-               mt76_mac_fill_tx_status(dev, &info, stat, 1);
-               *update = 1;
-       }
-
-       spin_lock_bh(&dev->mac_lock);
-       ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
-       spin_unlock_bh(&dev->mac_lock);
-out:
-       rcu_read_unlock();
-}
+#include "mt76x0.h"
+#include "trace.h"
 
-void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
-                               int ht_mode)
+void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot,
+                              int ht_mode)
 {
        int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
        bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
@@ -305,7 +77,7 @@ void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
                mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
 }
 
-void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
+void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb)
 {
        if (short_preamb)
                mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
@@ -313,7 +85,7 @@ void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
                mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
 }
 
-void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
+void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval)
 {
        u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG);
 
@@ -333,7 +105,7 @@ void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
                MT_BEACON_TIME_CFG_TBTT_EN;
 }
 
-static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
+static void mt76x0_check_mac_err(struct mt76x02_dev *dev)
 {
        u32 val = mt76_rr(dev, 0x10f4);
 
@@ -348,15 +120,15 @@ static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
 }
 void mt76x0_mac_work(struct work_struct *work)
 {
-       struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+       struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
                                               mac_work.work);
        struct {
                u32 addr_base;
                u32 span;
                u64 *stat_base;
        } spans[] = {
-               { MT_RX_STA_CNT0,       3,      dev->stats.rx_stat },
-               { MT_TX_STA_CNT0,       3,      dev->stats.tx_stat },
+               { MT_RX_STAT_0, 3,      dev->stats.rx_stat },
+               { MT_TX_STA_0,  3,      dev->stats.tx_stat },
                { MT_TX_AGG_STAT,       1,      dev->stats.aggr_stat },
                { MT_MPDU_DENSITY_CNT,  1,      dev->stats.zero_len_del },
                { MT_TX_AGG_CNT_BASE0,  8,      &dev->stats.aggr_n[0] },
@@ -399,24 +171,7 @@ void mt76x0_mac_work(struct work_struct *work)
        ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ);
 }
 
-void
-mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
-{
-       u8 zmac[ETH_ALEN] = {};
-       u32 attr;
-
-       attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
-              FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
-
-       mt76_wr(dev, MT_WCID_ATTR(idx), attr);
-
-       if (mac)
-               memcpy(zmac, mac, sizeof(zmac));
-
-       mt76x0_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
-}
-
-void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
+void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev)
 {
        struct ieee80211_sta *sta;
        struct mt76_wcid *wcid;
@@ -425,12 +180,12 @@ void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
        int i;
 
        rcu_read_lock();
-       for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
-               wcid = rcu_dereference(dev->wcid[i]);
+       for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
+               wcid = rcu_dereference(dev->mt76.wcid[i]);
                if (!wcid)
                        continue;
 
-               msta = container_of(wcid, struct mt76_sta, wcid);
+               msta = container_of(wcid, struct mt76x02_sta, wcid);
                sta = container_of(msta, struct ieee80211_sta, drv_priv);
 
                min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
@@ -440,219 +195,3 @@ void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
        mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
                   FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
 }
-
-static void
-mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
-{
-       u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
-       switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
-       case MT_PHY_TYPE_OFDM:
-               if (idx >= 8)
-                       idx = 0;
-
-               if (status->band == NL80211_BAND_2GHZ)
-                       idx += 4;
-
-               status->rate_idx = idx;
-               return;
-       case MT_PHY_TYPE_CCK:
-               if (idx >= 8) {
-                       idx -= 8;
-                       status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
-               }
-
-               if (idx >= 4)
-                       idx = 0;
-
-               status->rate_idx = idx;
-               return;
-       case MT_PHY_TYPE_HT_GF:
-               status->enc_flags |= RX_ENC_FLAG_HT_GF;
-               /* fall through */
-       case MT_PHY_TYPE_HT:
-               status->encoding = RX_ENC_HT;
-               status->rate_idx = idx;
-               break;
-       case MT_PHY_TYPE_VHT:
-               status->encoding = RX_ENC_VHT;
-               status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
-               status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
-               break;
-       default:
-               WARN_ON(1);
-               return;
-       }
-
-       if (rate & MT_RXWI_RATE_LDPC)
-               status->enc_flags |= RX_ENC_FLAG_LDPC;
-
-       if (rate & MT_RXWI_RATE_SGI)
-               status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
-
-       if (rate & MT_RXWI_RATE_STBC)
-               status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
-
-       switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
-       case MT_PHY_BW_20:
-               break;
-       case MT_PHY_BW_40:
-               status->bw = RATE_INFO_BW_40;
-               break;
-       case MT_PHY_BW_80:
-               status->bw = RATE_INFO_BW_80;
-               break;
-       default:
-               WARN_ON(1);
-               break;
-       }
-}
-
-static void
-mt76x0_rx_monitor_beacon(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
-                         u16 rate, int rssi)
-{
-       dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
-       dev->avg_rssi = ((dev->avg_rssi * 15) / 16 + (rssi << 8)) / 256;
-}
-
-static int
-mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data)
-{
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
-
-       return ieee80211_is_beacon(hdr->frame_control) &&
-               ether_addr_equal(hdr->addr2, dev->ap_bssid);
-}
-
-u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
-                       u8 *data, void *rxi)
-{
-       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-       struct mt76x0_rxwi *rxwi = rxi;
-       u32 len, ctl = le32_to_cpu(rxwi->ctl);
-       u16 rate = le16_to_cpu(rxwi->rate);
-       int rssi;
-
-       len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
-       if (WARN_ON(len < 10))
-               return 0;
-
-       if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
-               status->flag |= RX_FLAG_DECRYPTED;
-               status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
-       }
-
-       status->chains = BIT(0);
-       rssi = mt76x0_phy_get_rssi(dev, rxwi);
-       status->chain_signal[0] = status->signal = rssi;
-       status->freq = dev->mt76.chandef.chan->center_freq;
-       status->band = dev->mt76.chandef.chan->band;
-
-       mt76_mac_process_rate(status, rate);
-
-       spin_lock_bh(&dev->con_mon_lock);
-       if (mt76x0_rx_is_our_beacon(dev, data)) {
-               mt76x0_rx_monitor_beacon(dev, rxwi, rate, rssi);
-       } else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) {
-               if (dev->avg_rssi == 0)
-                       dev->avg_rssi = rssi;
-               else
-                       dev->avg_rssi = (dev->avg_rssi * 15) / 16 + rssi / 16;
-
-       }
-       spin_unlock_bh(&dev->con_mon_lock);
-
-       return len;
-}
-
-static enum mt76_cipher_type
-mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
-{
-       memset(key_data, 0, 32);
-       if (!key)
-               return MT_CIPHER_NONE;
-
-       if (key->keylen > 32)
-               return MT_CIPHER_NONE;
-
-       memcpy(key_data, key->key, key->keylen);
-
-       switch (key->cipher) {
-       case WLAN_CIPHER_SUITE_WEP40:
-               return MT_CIPHER_WEP40;
-       case WLAN_CIPHER_SUITE_WEP104:
-               return MT_CIPHER_WEP104;
-       case WLAN_CIPHER_SUITE_TKIP:
-               return MT_CIPHER_TKIP;
-       case WLAN_CIPHER_SUITE_CCMP:
-               return MT_CIPHER_AES_CCMP;
-       default:
-               return MT_CIPHER_NONE;
-       }
-}
-
-int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
-                         struct ieee80211_key_conf *key)
-{
-       enum mt76_cipher_type cipher;
-       u8 key_data[32];
-       u8 iv_data[8];
-       u32 val;
-
-       cipher = mt76_mac_get_key_info(key, key_data);
-       if (cipher == MT_CIPHER_NONE && key)
-               return -EINVAL;
-
-       trace_mt76x0_set_key(&dev->mt76, idx);
-
-       mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
-
-       memset(iv_data, 0, sizeof(iv_data));
-       if (key) {
-               iv_data[3] = key->keyidx << 6;
-               if (cipher >= MT_CIPHER_TKIP) {
-                       /* Note: start with 1 to comply with spec,
-                        *       (see comment on common/cmm_wpa.c:4291).
-                        */
-                       iv_data[0] |= 1;
-                       iv_data[3] |= 0x20;
-               }
-       }
-       mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
-
-       val = mt76_rr(dev, MT_WCID_ATTR(idx));
-       val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
-       val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
-              FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
-       val &= ~MT_WCID_ATTR_PAIRWISE;
-       val |= MT_WCID_ATTR_PAIRWISE *
-               !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
-       mt76_wr(dev, MT_WCID_ATTR(idx), val);
-
-       return 0;
-}
-
-int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
-                             struct ieee80211_key_conf *key)
-{
-       enum mt76_cipher_type cipher;
-       u8 key_data[32];
-       u32 val;
-
-       cipher = mt76_mac_get_key_info(key, key_data);
-       if (cipher == MT_CIPHER_NONE && key)
-               return -EINVAL;
-
-       trace_mt76x0_set_shared_key(&dev->mt76, vif_idx, key_idx);
-
-       mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
-                       key_data, sizeof(key_data));
-
-       val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
-       val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
-       val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
-       mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
-
-       return 0;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
deleted file mode 100644 (file)
index bea067b..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MT76_MAC_H
-#define __MT76_MAC_H
-
-/* Note: values in original "RSSI" and "SNR" fields are not actually what they
- *      are called for MT76X0U, names used by this driver are educated guesses
- *      (see vendor mac/ral_omac.c).
- */
-struct mt76x0_rxwi {
-       __le32 rxinfo;
-
-       __le32 ctl;
-
-       __le16 tid_sn;
-       __le16 rate;
-
-       s8 rssi[4];
-
-       __le32 bbp_rxinfo[4];
-} __packed __aligned(4);
-
-#define MT_RXINFO_BA                   BIT(0)
-#define MT_RXINFO_DATA                 BIT(1)
-#define MT_RXINFO_NULL                 BIT(2)
-#define MT_RXINFO_FRAG                 BIT(3)
-#define MT_RXINFO_U2M                  BIT(4)
-#define MT_RXINFO_MULTICAST            BIT(5)
-#define MT_RXINFO_BROADCAST            BIT(6)
-#define MT_RXINFO_MYBSS                        BIT(7)
-#define MT_RXINFO_CRCERR               BIT(8)
-#define MT_RXINFO_ICVERR               BIT(9)
-#define MT_RXINFO_MICERR               BIT(10)
-#define MT_RXINFO_AMSDU                        BIT(11)
-#define MT_RXINFO_HTC                  BIT(12)
-#define MT_RXINFO_RSSI                 BIT(13)
-#define MT_RXINFO_L2PAD                        BIT(14)
-#define MT_RXINFO_AMPDU                        BIT(15)
-#define MT_RXINFO_DECRYPT              BIT(16)
-#define MT_RXINFO_BSSIDX3              BIT(17)
-#define MT_RXINFO_WAPI_KEY             BIT(18)
-#define MT_RXINFO_PN_LEN               GENMASK(21, 19)
-#define MT_RXINFO_SW_PKT_80211         BIT(22)
-#define MT_RXINFO_TCP_SUM_BYPASS       BIT(28)
-#define MT_RXINFO_IP_SUM_BYPASS                BIT(29)
-#define MT_RXINFO_TCP_SUM_ERR          BIT(30)
-#define MT_RXINFO_IP_SUM_ERR           BIT(31)
-
-#define MT_RXWI_CTL_WCID               GENMASK(7, 0)
-#define MT_RXWI_CTL_KEY_IDX            GENMASK(9, 8)
-#define MT_RXWI_CTL_BSS_IDX            GENMASK(12, 10)
-#define MT_RXWI_CTL_UDF                        GENMASK(15, 13)
-#define MT_RXWI_CTL_MPDU_LEN           GENMASK(27, 16)
-#define MT_RXWI_CTL_TID                        GENMASK(31, 28)
-
-#define MT_RXWI_FRAG                   GENMASK(3, 0)
-#define MT_RXWI_SN                     GENMASK(15, 4)
-
-#define MT_RXWI_RATE_INDEX             GENMASK(5, 0)
-#define MT_RXWI_RATE_LDPC              BIT(6)
-#define MT_RXWI_RATE_BW                        GENMASK(8, 7)
-#define MT_RXWI_RATE_SGI               BIT(9)
-#define MT_RXWI_RATE_STBC              BIT(10)
-#define MT_RXWI_RATE_LDPC_ETXBF                BIT(11)
-#define MT_RXWI_RATE_SND               BIT(12)
-#define MT_RXWI_RATE_PHY               GENMASK(15, 13)
-
-#define MT_RATE_INDEX_VHT_IDX          GENMASK(3, 0)
-#define MT_RATE_INDEX_VHT_NSS          GENMASK(5, 4)
-
-#define MT_RXWI_GAIN_RSSI_VAL          GENMASK(5, 0)
-#define MT_RXWI_GAIN_RSSI_LNA_ID       GENMASK(7, 6)
-#define MT_RXWI_ANT_AUX_LNA            BIT(7)
-
-#define MT_RXWI_EANT_ENC_ANT_ID                GENMASK(7, 0)
-
-enum mt76_phy_bandwidth {
-       MT_PHY_BW_20,
-       MT_PHY_BW_40,
-       MT_PHY_BW_80,
-};
-
-struct mt76_txwi {
-       __le16 flags;
-       __le16 rate_ctl;
-       u8 ack_ctl;
-       u8 wcid;
-       __le16 len_ctl;
-       __le32 iv;
-       __le32 eiv;
-       u8 aid;
-       u8 txstream;
-       u8 ctl2;
-       u8 pktid;
-} __packed __aligned(4);
-
-#define MT_TXWI_FLAGS_FRAG             BIT(0)
-#define MT_TXWI_FLAGS_MMPS             BIT(1)
-#define MT_TXWI_FLAGS_CFACK            BIT(2)
-#define MT_TXWI_FLAGS_TS               BIT(3)
-#define MT_TXWI_FLAGS_AMPDU            BIT(4)
-#define MT_TXWI_FLAGS_MPDU_DENSITY     GENMASK(7, 5)
-#define MT_TXWI_FLAGS_TXOP             GENMASK(9, 8)
-#define MT_TXWI_FLAGS_CWMIN            GENMASK(12, 10)
-#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13)
-#define MT_TXWI_FLAGS_TX_RPT           BIT(14)
-#define MT_TXWI_FLAGS_TX_RATE_LUT      BIT(15)
-
-#define MT_TXWI_RATE_MCS               GENMASK(6, 0)
-#define MT_TXWI_RATE_BW                        BIT(7)
-#define MT_TXWI_RATE_SGI               BIT(8)
-#define MT_TXWI_RATE_STBC              GENMASK(10, 9)
-#define MT_TXWI_RATE_PHY_MODE          GENMASK(15, 14)
-
-#define MT_TXWI_ACK_CTL_REQ            BIT(0)
-#define MT_TXWI_ACK_CTL_NSEQ           BIT(1)
-#define MT_TXWI_ACK_CTL_BA_WINDOW      GENMASK(7, 2)
-
-#define MT_TXWI_LEN_BYTE_CNT           GENMASK(11, 0)
-
-#define MT_TXWI_CTL_TX_POWER_ADJ       GENMASK(3, 0)
-#define MT_TXWI_CTL_CHAN_CHECK_PKT     BIT(4)
-#define MT_TXWI_CTL_PIFS_REV           BIT(6)
-
-#define MT_TXWI_PKTID_PROBE             BIT(7)
-
-u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
-                       u8 *data, void *rxi);
-int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
-                         struct ieee80211_key_conf *key);
-void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
-                           const struct ieee80211_tx_rate *rate);
-
-int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
-                             struct ieee80211_key_conf *key);
-u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
-                        const struct ieee80211_tx_rate *rate, u8 *nss_val);
-struct mt76_tx_status
-mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev);
-void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update);
-
-#endif
index cf6ffb1ba4a290e1561374c68af9b947215af0ea..c9cd0254a97975be8b0587072a65a66c3e22a969 100644 (file)
  * GNU General Public License for more details.
  */
 
-#include "mt76x0.h"
-#include "mac.h"
 #include <linux/etherdevice.h>
+#include "mt76x0.h"
 
-static int mt76x0_start(struct ieee80211_hw *hw)
-{
-       struct mt76x0_dev *dev = hw->priv;
-       int ret;
-
-       mutex_lock(&dev->mutex);
-
-       ret = mt76x0_mac_start(dev);
-       if (ret)
-               goto out;
-
-       ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
-                                    MT_CALIBRATE_INTERVAL);
-       ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
-                                    MT_CALIBRATE_INTERVAL);
-out:
-       mutex_unlock(&dev->mutex);
-       return ret;
-}
-
-static void mt76x0_stop(struct ieee80211_hw *hw)
-{
-       struct mt76x0_dev *dev = hw->priv;
-
-       mutex_lock(&dev->mutex);
-
-       cancel_delayed_work_sync(&dev->cal_work);
-       cancel_delayed_work_sync(&dev->mac_work);
-       mt76x0_mac_stop(dev);
-
-       mutex_unlock(&dev->mutex);
-}
-
-
-static int mt76x0_add_interface(struct ieee80211_hw *hw,
-                                struct ieee80211_vif *vif)
+int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
 {
-       struct mt76x0_dev *dev = hw->priv;
-       struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
-       unsigned int idx;
-
-       idx = ffs(~dev->vif_mask);
-       if (!idx || idx > 8)
-               return -ENOSPC;
-
-       idx--;
-       dev->vif_mask |= BIT(idx);
-
-       mvif->idx = idx;
-       mvif->group_wcid.idx = GROUP_WCID(idx);
-       mvif->group_wcid.hw_key_idx = -1;
-
-       return 0;
-}
+       struct mt76x02_dev *dev = hw->priv;
+       int ret = 0;
 
-static void mt76x0_remove_interface(struct ieee80211_hw *hw,
-                                    struct ieee80211_vif *vif)
-{
-       struct mt76x0_dev *dev = hw->priv;
-       struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
-       unsigned int wcid = mvif->group_wcid.idx;
+       mutex_lock(&dev->mt76.mutex);
 
-       dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
-}
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               ieee80211_stop_queues(hw);
+               ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef);
+               ieee80211_wake_queues(hw);
+       }
 
-static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
-{
-       struct mt76x0_dev *dev = hw->priv;
-       int ret = 0;
+       if (changed & IEEE80211_CONF_CHANGE_POWER) {
+               dev->mt76.txpower_conf = hw->conf.power_level * 2;
 
-       mutex_lock(&dev->mutex);
+               if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+                       mt76x0_phy_set_txpower(dev);
+       }
 
        if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
                if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
-                       dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+                       dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC;
                else
-                       dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+                       dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
 
-               mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+               mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
        }
 
-       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
-               ieee80211_stop_queues(hw);
-               ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef);
-               ieee80211_wake_queues(hw);
-       }
-
-       mutex_unlock(&dev->mutex);
+       mutex_unlock(&dev->mt76.mutex);
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(mt76x0_config);
 
 static void
-mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
-                     unsigned int *total_flags, u64 multicast)
+mt76x0_addr_wr(struct mt76x02_dev *dev, const u32 offset, const u8 *addr)
 {
-       struct mt76x0_dev *dev = hw->priv;
-       u32 flags = 0;
-
-#define MT76_FILTER(_flag, _hw) do { \
-               flags |= *total_flags & FIF_##_flag;                    \
-               dev->rxfilter &= ~(_hw);                                \
-               dev->rxfilter |= !(flags & FIF_##_flag) * (_hw);        \
-       } while (0)
-
-       mutex_lock(&dev->mutex);
-
-       dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
-
-       MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
-       MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
-       MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
-                            MT_RX_FILTR_CFG_CTS |
-                            MT_RX_FILTR_CFG_CFEND |
-                            MT_RX_FILTR_CFG_CFACK |
-                            MT_RX_FILTR_CFG_BA |
-                            MT_RX_FILTR_CFG_CTRL_RSV);
-       MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
-
-       *total_flags = flags;
-       mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
-       mutex_unlock(&dev->mutex);
+       mt76_wr(dev, offset, get_unaligned_le32(addr));
+       mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
 }
 
-static void
-mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                        struct ieee80211_bss_conf *info, u32 changed)
+void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_bss_conf *info, u32 changed)
 {
-       struct mt76x0_dev *dev = hw->priv;
-
-       mutex_lock(&dev->mutex);
+       struct mt76x02_dev *dev = hw->priv;
 
-       if (changed & BSS_CHANGED_ASSOC)
-               mt76x0_phy_con_cal_onoff(dev, info);
+       mutex_lock(&dev->mt76.mutex);
 
        if (changed & BSS_CHANGED_BSSID) {
                mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
@@ -166,8 +79,8 @@ mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 
        if (changed & BSS_CHANGED_BASIC_RATES) {
                mt76_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
-               mt76_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
-               mt76_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
+               mt76_wr(dev, MT_VHT_HT_FBK_CFG0, 0x65432100);
+               mt76_wr(dev, MT_VHT_HT_FBK_CFG1, 0xedcba980);
                mt76_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
                mt76_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
        }
@@ -192,82 +105,25 @@ mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        if (changed & BSS_CHANGED_ASSOC)
                mt76x0_phy_recalibrate_after_assoc(dev);
 
-       mutex_unlock(&dev->mutex);
-}
-
-static int
-mt76x0_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-               struct ieee80211_sta *sta)
-{
-       struct mt76x0_dev *dev = hw->priv;
-       struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
-       struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
-       int ret = 0;
-       int idx = 0;
-
-       mutex_lock(&dev->mutex);
-
-       idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
-       if (idx < 0) {
-               ret = -ENOSPC;
-               goto out;
-       }
-
-       msta->wcid.idx = idx;
-       msta->wcid.hw_key_idx = -1;
-       mt76x0_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
-       mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
-       rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
-       mt76x0_mac_set_ampdu_factor(dev);
-
-out:
-       mutex_unlock(&dev->mutex);
-
-       return ret;
-}
-
-static int
-mt76x0_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                  struct ieee80211_sta *sta)
-{
-       struct mt76x0_dev *dev = hw->priv;
-       struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
-       int idx = msta->wcid.idx;
-
-       mutex_lock(&dev->mutex);
-       rcu_assign_pointer(dev->wcid[idx], NULL);
-       mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
-       dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
-       mt76x0_mac_wcid_setup(dev, idx, 0, NULL);
-       mt76x0_mac_set_ampdu_factor(dev);
-       mutex_unlock(&dev->mutex);
-
-       return 0;
+       mutex_unlock(&dev->mt76.mutex);
 }
+EXPORT_SYMBOL_GPL(mt76x0_bss_info_changed);
 
-static void
-mt76x0_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                  enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                   const u8 *mac_addr)
 {
-}
-
-static void
-mt76x0_sw_scan(struct ieee80211_hw *hw,
-               struct ieee80211_vif *vif,
-               const u8 *mac_addr)
-{
-       struct mt76x0_dev *dev = hw->priv;
+       struct mt76x02_dev *dev = hw->priv;
 
        cancel_delayed_work_sync(&dev->cal_work);
        mt76x0_agc_save(dev);
        set_bit(MT76_SCANNING, &dev->mt76.state);
 }
+EXPORT_SYMBOL_GPL(mt76x0_sw_scan);
 
-static void
-mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
-                        struct ieee80211_vif *vif)
+void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif)
 {
-       struct mt76x0_dev *dev = hw->priv;
+       struct mt76x02_dev *dev = hw->priv;
 
        mt76x0_agc_restore(dev);
        clear_bit(MT76_SCANNING, &dev->mt76.state);
@@ -275,129 +131,14 @@ mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
        ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
                                     MT_CALIBRATE_INTERVAL);
 }
+EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete);
 
-static int
-mt76x0_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-               struct ieee80211_vif *vif, struct ieee80211_sta *sta,
-               struct ieee80211_key_conf *key)
+int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 {
-       struct mt76x0_dev *dev = hw->priv;
-       struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
-       struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
-       struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
-       int idx = key->keyidx;
-       int ret;
-
-       if (cmd == SET_KEY) {
-               key->hw_key_idx = wcid->idx;
-               wcid->hw_key_idx = idx;
-       } else {
-               if (idx == wcid->hw_key_idx)
-                       wcid->hw_key_idx = -1;
-
-               key = NULL;
-       }
-
-       if (!msta) {
-               if (key || wcid->hw_key_idx == idx) {
-                       ret = mt76x0_mac_wcid_set_key(dev, wcid->idx, key);
-                       if (ret)
-                               return ret;
-               }
-
-               return mt76x0_mac_shared_key_setup(dev, mvif->idx, idx, key);
-       }
-
-       return mt76x0_mac_wcid_set_key(dev, msta->wcid.idx, key);
-}
-
-static int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
-{
-       struct mt76x0_dev *dev = hw->priv;
+       struct mt76x02_dev *dev = hw->priv;
 
        mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
 
        return 0;
 }
-
-static int
-mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                 struct ieee80211_ampdu_params *params)
-{
-       struct mt76x0_dev *dev = hw->priv;
-       struct ieee80211_sta *sta = params->sta;
-       enum ieee80211_ampdu_mlme_action action = params->action;
-       u16 tid = params->tid;
-       u16 *ssn = &params->ssn;
-       struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
-
-       WARN_ON(msta->wcid.idx > N_WCIDS);
-
-       switch (action) {
-       case IEEE80211_AMPDU_RX_START:
-               mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
-               break;
-       case IEEE80211_AMPDU_RX_STOP:
-               mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
-               break;
-       case IEEE80211_AMPDU_TX_OPERATIONAL:
-               ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
-               break;
-       case IEEE80211_AMPDU_TX_STOP_FLUSH:
-       case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
-               break;
-       case IEEE80211_AMPDU_TX_START:
-               msta->agg_ssn[tid] = *ssn << 4;
-               ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-               break;
-       case IEEE80211_AMPDU_TX_STOP_CONT:
-               ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-               break;
-       }
-
-       return 0;
-}
-
-static void
-mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                        struct ieee80211_sta *sta)
-{
-       struct mt76x0_dev *dev = hw->priv;
-       struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
-       struct ieee80211_sta_rates *rates;
-       struct ieee80211_tx_rate rate = {};
-
-       rcu_read_lock();
-       rates = rcu_dereference(sta->rates);
-
-       if (!rates)
-               goto out;
-
-       rate.idx = rates->rate[0].idx;
-       rate.flags = rates->rate[0].flags;
-       mt76x0_mac_wcid_set_rate(dev, &msta->wcid, &rate);
-
-out:
-       rcu_read_unlock();
-}
-
-const struct ieee80211_ops mt76x0_ops = {
-       .tx = mt76x0_tx,
-       .start = mt76x0_start,
-       .stop = mt76x0_stop,
-       .add_interface = mt76x0_add_interface,
-       .remove_interface = mt76x0_remove_interface,
-       .config = mt76x0_config,
-       .configure_filter = mt76_configure_filter,
-       .bss_info_changed = mt76x0_bss_info_changed,
-       .sta_add = mt76x0_sta_add,
-       .sta_remove = mt76x0_sta_remove,
-       .sta_notify = mt76x0_sta_notify,
-       .set_key = mt76x0_set_key,
-       .conf_tx = mt76x0_conf_tx,
-       .sw_scan_start = mt76x0_sw_scan,
-       .sw_scan_complete = mt76x0_sw_scan_complete,
-       .ampdu_action = mt76_ampdu_action,
-       .sta_rate_tbl_update = mt76_sta_rate_tbl_update,
-       .set_rts_threshold = mt76x0_set_rts_threshold,
-};
+EXPORT_SYMBOL_GPL(mt76x0_set_rts_threshold);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
deleted file mode 100644 (file)
index 8affacb..0000000
+++ /dev/null
@@ -1,656 +0,0 @@
-/*
- * (c) Copyright 2002-2010, Ralink Technology, Inc.
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/firmware.h>
-#include <linux/delay.h>
-#include <linux/usb.h>
-#include <linux/skbuff.h>
-
-#include "mt76x0.h"
-#include "dma.h"
-#include "mcu.h"
-#include "usb.h"
-#include "trace.h"
-
-#define MCU_FW_URB_MAX_PAYLOAD         0x38f8
-#define MCU_FW_URB_SIZE                        (MCU_FW_URB_MAX_PAYLOAD + 12)
-#define MCU_RESP_URB_SIZE              1024
-
-static inline int firmware_running(struct mt76x0_dev *dev)
-{
-       return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
-}
-
-static inline void skb_put_le32(struct sk_buff *skb, u32 val)
-{
-       put_unaligned_le32(val, skb_put(skb, 4));
-}
-
-static inline void mt76x0_dma_skb_wrap_cmd(struct sk_buff *skb,
-                                           u8 seq, enum mcu_cmd cmd)
-{
-       WARN_ON(mt76x0_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
-                                    FIELD_PREP(MT_TXD_CMD_SEQ, seq) |
-                                    FIELD_PREP(MT_TXD_CMD_TYPE, cmd)));
-}
-
-static inline void trace_mt76x0_mcu_msg_send_cs(struct mt76_dev *dev,
-                                           struct sk_buff *skb, bool need_resp)
-{
-       u32 i, csum = 0;
-
-       for (i = 0; i < skb->len / 4; i++)
-               csum ^= get_unaligned_le32(skb->data + i * 4);
-
-       trace_mt76x0_mcu_msg_send(dev, skb, csum, need_resp);
-}
-
-static struct sk_buff *
-mt76x0_mcu_msg_alloc(struct mt76x0_dev *dev, const void *data, int len)
-{
-       struct sk_buff *skb;
-
-       WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
-
-       skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
-       if (skb) {
-               skb_reserve(skb, MT_DMA_HDR_LEN);
-               memcpy(skb_put(skb, len), data, len);
-       }
-       return skb;
-}
-
-static void mt76x0_read_resp_regs(struct mt76x0_dev *dev, int len)
-{
-       int i;
-       int n = dev->mcu.reg_pairs_len;
-       u8 *buf = dev->mcu.resp.buf;
-
-       buf += 4;
-       len -= 8;
-
-       if (dev->mcu.burst_read) {
-               u32 reg = dev->mcu.reg_pairs[0].reg - dev->mcu.reg_base;
-
-               WARN_ON_ONCE(len/4 != n);
-               for (i = 0; i < n; i++) {
-                       u32 val = get_unaligned_le32(buf + 4*i);
-
-                       dev->mcu.reg_pairs[i].reg = reg++;
-                       dev->mcu.reg_pairs[i].value = val;
-               }
-       } else {
-               WARN_ON_ONCE(len/8 != n);
-               for (i = 0; i < n; i++) {
-                       u32 reg = get_unaligned_le32(buf + 8*i) - dev->mcu.reg_base;
-                       u32 val = get_unaligned_le32(buf + 8*i + 4);
-
-                       WARN_ON_ONCE(dev->mcu.reg_pairs[i].reg != reg);
-                       dev->mcu.reg_pairs[i].value = val;
-               }
-       }
-}
-
-static int mt76x0_mcu_wait_resp(struct mt76x0_dev *dev, u8 seq)
-{
-       struct urb *urb = dev->mcu.resp.urb;
-       u32 rxfce;
-       int urb_status, ret, try = 5;
-
-       while (try--) {
-               if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
-                                                msecs_to_jiffies(300))) {
-                       dev_warn(dev->mt76.dev, "Warning: %s retrying\n", __func__);
-                       continue;
-               }
-
-               /* Make copies of important data before reusing the urb */
-               rxfce = get_unaligned_le32(dev->mcu.resp.buf);
-               urb_status = urb->status * mt76x0_urb_has_error(urb);
-
-               if (urb_status == 0 && dev->mcu.reg_pairs)
-                       mt76x0_read_resp_regs(dev, urb->actual_length);
-
-               ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
-                                            &dev->mcu.resp, GFP_KERNEL,
-                                            mt76x0_complete_urb,
-                                            &dev->mcu.resp_cmpl);
-               if (ret)
-                       return ret;
-
-               if (urb_status)
-                       dev_err(dev->mt76.dev, "Error: MCU resp urb failed:%d\n",
-                               urb_status);
-
-               if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
-                   FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
-                       return 0;
-
-               dev_err(dev->mt76.dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
-                       FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
-                       seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
-       }
-
-       dev_err(dev->mt76.dev, "Error: %s timed out\n", __func__);
-       return -ETIMEDOUT;
-}
-
-static int
-__mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
-                     enum mcu_cmd cmd, bool wait_resp)
-{
-       struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
-       unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
-                                           dev->out_ep[MT_EP_OUT_INBAND_CMD]);
-       int sent, ret;
-       u8 seq = 0;
-
-       if (wait_resp)
-               while (!seq)
-                       seq = ++dev->mcu.msg_seq & 0xf;
-
-       mt76x0_dma_skb_wrap_cmd(skb, seq, cmd);
-
-       if (dev->mcu.resp_cmpl.done)
-               dev_err(dev->mt76.dev, "Error: MCU response pre-completed!\n");
-
-       trace_mt76x0_mcu_msg_send_cs(&dev->mt76, skb, wait_resp);
-       trace_mt76x0_submit_urb_sync(&dev->mt76, cmd_pipe, skb->len);
-
-       ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
-       if (ret) {
-               dev_err(dev->mt76.dev, "Error: send MCU cmd failed:%d\n", ret);
-               goto out;
-       }
-       if (sent != skb->len)
-               dev_err(dev->mt76.dev, "Error: %s sent != skb->len\n", __func__);
-
-       if (wait_resp)
-               ret = mt76x0_mcu_wait_resp(dev, seq);
-
-out:
-       return ret;
-}
-
-static int
-mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
-                    enum mcu_cmd cmd, bool wait_resp)
-{
-       int ret;
-
-       if (test_bit(MT76_REMOVED, &dev->mt76.state))
-               return 0;
-
-       mutex_lock(&dev->mcu.mutex);
-       ret = __mt76x0_mcu_msg_send(dev, skb, cmd, wait_resp);
-       mutex_unlock(&dev->mcu.mutex);
-
-       consume_skb(skb);
-
-       return ret;
-}
-
-int mt76x0_mcu_function_select(struct mt76x0_dev *dev,
-                              enum mcu_function func, u32 val)
-{
-       struct sk_buff *skb;
-       struct {
-               __le32 id;
-               __le32 value;
-       } __packed __aligned(4) msg = {
-               .id = cpu_to_le32(func),
-               .value = cpu_to_le32(val),
-       };
-
-       skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
-       if (!skb)
-               return -ENOMEM;
-       return mt76x0_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
-}
-
-int
-mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val)
-{
-       struct sk_buff *skb;
-       struct {
-               __le32 id;
-               __le32 value;
-       } __packed __aligned(4) msg = {
-               .id = cpu_to_le32(cal),
-               .value = cpu_to_le32(val),
-       };
-
-       skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
-       if (!skb)
-               return -ENOMEM;
-       return mt76x0_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
-}
-
-int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
-                          const struct mt76_reg_pair *data, int n)
-{
-       const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
-       struct sk_buff *skb;
-       int cnt, i, ret;
-
-       if (!n)
-               return 0;
-
-       cnt = min(max_vals_per_cmd, n);
-
-       skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
-       if (!skb)
-               return -ENOMEM;
-       skb_reserve(skb, MT_DMA_HDR_LEN);
-
-       for (i = 0; i < cnt; i++) {
-               skb_put_le32(skb, base + data[i].reg);
-               skb_put_le32(skb, data[i].value);
-       }
-
-       ret = mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
-       if (ret)
-               return ret;
-
-       return mt76x0_write_reg_pairs(dev, base, data + cnt, n - cnt);
-}
-
-int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
-                         struct mt76_reg_pair *data, int n)
-{
-       const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
-       struct sk_buff *skb;
-       int cnt, i, ret;
-
-       if (!n)
-               return 0;
-
-       cnt = min(max_vals_per_cmd, n);
-       if (cnt != n)
-               return -EINVAL;
-
-       skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
-       if (!skb)
-               return -ENOMEM;
-       skb_reserve(skb, MT_DMA_HDR_LEN);
-
-       for (i = 0; i < cnt; i++) {
-               skb_put_le32(skb, base + data[i].reg);
-               skb_put_le32(skb, data[i].value);
-       }
-
-       mutex_lock(&dev->mcu.mutex);
-
-       dev->mcu.reg_pairs = data;
-       dev->mcu.reg_pairs_len = n;
-       dev->mcu.reg_base = base;
-       dev->mcu.burst_read = false;
-
-       ret = __mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_READ, true);
-
-       dev->mcu.reg_pairs = NULL;
-
-       mutex_unlock(&dev->mcu.mutex);
-
-       consume_skb(skb);
-
-       return ret;
-
-}
-
-int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
-                            const u32 *data, int n)
-{
-       const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
-       struct sk_buff *skb;
-       int cnt, i, ret;
-
-       if (!n)
-               return 0;
-
-       cnt = min(max_regs_per_cmd, n);
-
-       skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
-       if (!skb)
-               return -ENOMEM;
-       skb_reserve(skb, MT_DMA_HDR_LEN);
-
-       skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
-       for (i = 0; i < cnt; i++)
-               skb_put_le32(skb, data[i]);
-
-       ret = mt76x0_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
-       if (ret)
-               return ret;
-
-       return mt76x0_burst_write_regs(dev, offset + cnt * 4,
-                                       data + cnt, n - cnt);
-}
-
-#if 0
-static int mt76x0_burst_read_regs(struct mt76x0_dev *dev, u32 base,
-                                 struct mt76_reg_pair *data, int n)
-{
-       const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
-       struct sk_buff *skb;
-       int cnt, ret;
-
-       if (!n)
-               return 0;
-
-       cnt = min(max_vals_per_cmd, n);
-       if (cnt != n)
-               return -EINVAL;
-
-       skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
-       if (!skb)
-               return -ENOMEM;
-       skb_reserve(skb, MT_DMA_HDR_LEN);
-
-       skb_put_le32(skb, base + data[0].reg);
-       skb_put_le32(skb, n);
-
-       mutex_lock(&dev->mcu.mutex);
-
-       dev->mcu.reg_pairs = data;
-       dev->mcu.reg_pairs_len = n;
-       dev->mcu.reg_base = base;
-       dev->mcu.burst_read = true;
-
-       ret = __mt76x0_mcu_msg_send(dev, skb, CMD_BURST_READ, true);
-
-       dev->mcu.reg_pairs = NULL;
-
-       mutex_unlock(&dev->mcu.mutex);
-
-       consume_skb(skb);
-
-       return ret;
-}
-#endif
-
-struct mt76_fw_header {
-       __le32 ilm_len;
-       __le32 dlm_len;
-       __le16 build_ver;
-       __le16 fw_ver;
-       u8 pad[4];
-       char build_time[16];
-};
-
-struct mt76_fw {
-       struct mt76_fw_header hdr;
-       u8 ivb[MT_MCU_IVB_SIZE];
-       u8 ilm[];
-};
-
-static int __mt76x0_dma_fw(struct mt76x0_dev *dev,
-                           const struct mt76x0_dma_buf *dma_buf,
-                           const void *data, u32 len, u32 dst_addr)
-{
-       DECLARE_COMPLETION_ONSTACK(cmpl);
-       struct mt76x0_dma_buf buf = *dma_buf; /* we need to fake length */
-       __le32 reg;
-       u32 val;
-       int ret;
-
-       reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_COMMAND) |
-                         FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
-                         FIELD_PREP(MT_TXD_INFO_LEN, len));
-       memcpy(buf.buf, &reg, sizeof(reg));
-       memcpy(buf.buf + sizeof(reg), data, len);
-       memset(buf.buf + sizeof(reg) + len, 0, 8);
-
-       ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
-                                      MT_FCE_DMA_ADDR, dst_addr);
-       if (ret)
-               return ret;
-       len = roundup(len, 4);
-       ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
-                                      MT_FCE_DMA_LEN, len << 16);
-       if (ret)
-               return ret;
-
-       buf.len = MT_DMA_HDR_LEN + len + 4;
-       ret = mt76x0_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
-                                    &buf, GFP_KERNEL,
-                                    mt76x0_complete_urb, &cmpl);
-       if (ret)
-               return ret;
-
-       if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
-               dev_err(dev->mt76.dev, "Error: firmware upload timed out\n");
-               usb_kill_urb(buf.urb);
-               return -ETIMEDOUT;
-       }
-       if (mt76x0_urb_has_error(buf.urb)) {
-               dev_err(dev->mt76.dev, "Error: firmware upload urb failed:%d\n",
-                       buf.urb->status);
-               return buf.urb->status;
-       }
-
-       val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
-       val++;
-       mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
-
-       msleep(5);
-
-       return 0;
-}
-
-static int
-mt76x0_dma_fw(struct mt76x0_dev *dev, struct mt76x0_dma_buf *dma_buf,
-              const void *data, int len, u32 dst_addr)
-{
-       int n, ret;
-
-       if (len == 0)
-               return 0;
-
-       n = min(MCU_FW_URB_MAX_PAYLOAD, len);
-       ret = __mt76x0_dma_fw(dev, dma_buf, data, n, dst_addr);
-       if (ret)
-               return ret;
-
-#if 0
-       if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
-               return -ETIMEDOUT;
-#endif
-
-       return mt76x0_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
-}
-
-static int
-mt76x0_upload_firmware(struct mt76x0_dev *dev, const struct mt76_fw *fw)
-{
-       struct mt76x0_dma_buf dma_buf;
-       void *ivb;
-       u32 ilm_len, dlm_len;
-       int i, ret;
-
-       ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
-       if (!ivb)
-               return -ENOMEM;
-       if (mt76x0_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
-       dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %zu\n",
-               ilm_len, sizeof(fw->ivb));
-       ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
-       if (ret)
-               goto error;
-
-       dlm_len = le32_to_cpu(fw->hdr.dlm_len);
-       dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
-       ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
-                            dlm_len, MT_MCU_DLM_OFFSET);
-       if (ret)
-               goto error;
-
-       ret = mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
-                                    0x12, 0, ivb, sizeof(fw->ivb));
-       if (ret < 0)
-               goto error;
-       ret = 0;
-
-       for (i = 100; i && !firmware_running(dev); i--)
-               msleep(10);
-       if (!i) {
-               ret = -ETIMEDOUT;
-               goto error;
-       }
-
-       dev_dbg(dev->mt76.dev, "Firmware running!\n");
-error:
-       kfree(ivb);
-       mt76x0_usb_free_buf(dev, &dma_buf);
-
-       return ret;
-}
-
-static int mt76x0_load_firmware(struct mt76x0_dev *dev)
-{
-       const struct firmware *fw;
-       const struct mt76_fw_header *hdr;
-       int len, ret;
-       u32 val;
-
-       mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
-                                        MT_USB_DMA_CFG_TX_BULK_EN));
-
-       if (firmware_running(dev))
-               return 0;
-
-       ret = request_firmware(&fw, MT7610_FIRMWARE, dev->mt76.dev);
-       if (ret)
-               return ret;
-
-       if (!fw || !fw->data || fw->size < sizeof(*hdr))
-               goto err_inv_fw;
-
-       hdr = (const struct mt76_fw_header *) fw->data;
-
-       if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
-               goto err_inv_fw;
-
-       len = sizeof(*hdr);
-       len += le32_to_cpu(hdr->ilm_len);
-       len += le32_to_cpu(hdr->dlm_len);
-
-       if (fw->size != len)
-               goto err_inv_fw;
-
-       val = le16_to_cpu(hdr->fw_ver);
-       dev_dbg(dev->mt76.dev,
-                "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
-                (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
-                le16_to_cpu(hdr->build_ver), hdr->build_time);
-
-       len = le32_to_cpu(hdr->ilm_len);
-
-       mt76_wr(dev, 0x1004, 0x2c);
-
-       mt76_set(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
-                                      MT_USB_DMA_CFG_TX_BULK_EN) |
-                                      FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
-       mt76x0_vendor_reset(dev);
-       msleep(5);
-/*
-       mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
-                                        MT_PBF_CFG_TX1Q_EN |
-                                        MT_PBF_CFG_TX2Q_EN |
-                                        MT_PBF_CFG_TX3Q_EN));
-*/
-
-       mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
-
-       /* FCE tx_fs_base_ptr */
-       mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
-       /* FCE tx_fs_max_cnt */
-       mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
-       /* FCE pdma enable */
-       mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
-       /* FCE skip_fs_en */
-       mt76_wr(dev, MT_FCE_SKIP_FS, 3);
-
-       val = mt76_rr(dev, MT_USB_DMA_CFG);
-       val |= MT_USB_DMA_CFG_TX_WL_DROP;
-       mt76_wr(dev, MT_USB_DMA_CFG, val);
-       val &= ~MT_USB_DMA_CFG_TX_WL_DROP;
-       mt76_wr(dev, MT_USB_DMA_CFG, val);
-
-       ret = mt76x0_upload_firmware(dev, (const struct mt76_fw *)fw->data);
-       release_firmware(fw);
-
-       mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
-
-       return ret;
-
-err_inv_fw:
-       dev_err(dev->mt76.dev, "Invalid firmware image\n");
-       release_firmware(fw);
-       return -ENOENT;
-}
-
-int mt76x0_mcu_init(struct mt76x0_dev *dev)
-{
-       int ret;
-
-       mutex_init(&dev->mcu.mutex);
-
-       ret = mt76x0_load_firmware(dev);
-       if (ret)
-               return ret;
-
-       set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
-
-       return 0;
-}
-
-int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev)
-{
-       int ret;
-
-       ret = mt76x0_mcu_function_select(dev, Q_SELECT, 1);
-       if (ret)
-               return ret;
-
-       init_completion(&dev->mcu.resp_cmpl);
-       if (mt76x0_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
-               mt76x0_usb_free_buf(dev, &dev->mcu.resp);
-               return -ENOMEM;
-       }
-
-       ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
-                                    &dev->mcu.resp, GFP_KERNEL,
-                                    mt76x0_complete_urb, &dev->mcu.resp_cmpl);
-       if (ret) {
-               mt76x0_usb_free_buf(dev, &dev->mcu.resp);
-               return ret;
-       }
-
-       return 0;
-}
-
-void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev)
-{
-       usb_kill_urb(dev->mcu.resp.urb);
-       mt76x0_usb_free_buf(dev, &dev->mcu.resp);
-}
index 8c2f77f4c3f577daf989e1392571518554fa05fe..b66e70f6cd899762ad832af834d5932a53fd3c37 100644 (file)
 #ifndef __MT76X0U_MCU_H
 #define __MT76X0U_MCU_H
 
-struct mt76x0_dev;
+#include "../mt76x02_mcu.h"
 
-/* Register definitions */
-#define MT_MCU_RESET_CTL               0x070C
-#define MT_MCU_INT_LEVEL               0x0718
-#define MT_MCU_COM_REG0                        0x0730
-#define MT_MCU_COM_REG1                        0x0734
-#define MT_MCU_COM_REG2                        0x0738
-#define MT_MCU_COM_REG3                        0x073C
+struct mt76x02_dev;
 
 #define MT_MCU_IVB_SIZE                        0x40
 #define MT_MCU_DLM_OFFSET              0x80000
 
-#define MT_MCU_MEMMAP_WLAN             0x00410000
 /* We use same space for BBP as for MAC regs
  * #define MT_MCU_MEMMAP_BBP           0x40000000
  */
 #define MT_MCU_MEMMAP_RF               0x80000000
 
-#define INBAND_PACKET_MAX_LEN          192
-
-enum mcu_cmd {
-       CMD_FUN_SET_OP = 1,
-       CMD_LOAD_CR = 2,
-       CMD_INIT_GAIN_OP = 3,
-       CMD_DYNC_VGA_OP = 6,
-       CMD_TDLS_CH_SW = 7,
-       CMD_BURST_WRITE = 8,
-       CMD_READ_MODIFY_WRITE = 9,
-       CMD_RANDOM_READ = 10,
-       CMD_BURST_READ = 11,
-       CMD_RANDOM_WRITE = 12,
-       CMD_LED_MODE_OP = 16,
-       CMD_POWER_SAVING_OP = 20,
-       CMD_WOW_CONFIG = 21,
-       CMD_WOW_QUERY = 22,
-       CMD_WOW_FEATURE = 24,
-       CMD_CARRIER_DETECT_OP = 28,
-       CMD_RADOR_DETECT_OP = 29,
-       CMD_SWITCH_CHANNEL_OP = 30,
-       CMD_CALIBRATION_OP = 31,
-       CMD_BEACON_OP = 32,
-       CMD_ANTENNA_OP = 33,
-};
-
-enum mcu_function {
-       Q_SELECT = 1,
-       BW_SETTING = 2,
-       ATOMIC_TSSI_SETTING = 5,
-};
-
-enum mcu_power_mode {
-       RADIO_OFF = 0x30,
-       RADIO_ON = 0x31,
-       RADIO_OFF_AUTO_WAKEUP = 0x32,
-       RADIO_OFF_ADVANCE = 0x33,
-       RADIO_ON_ADVANCE = 0x34,
-};
-
 enum mcu_calibrate {
        MCU_CAL_R = 1,
        MCU_CAL_RXDCOC,
@@ -88,14 +41,11 @@ enum mcu_calibrate {
        MCU_CAL_TX_GROUP_DELAY,
 };
 
-int mt76x0_mcu_init(struct mt76x0_dev *dev);
-int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev);
-void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev);
-
-int
-mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val);
-
-int
-mt76x0_mcu_function_select(struct mt76x0_dev *dev, enum mcu_function func, u32 val);
+int mt76x0e_mcu_init(struct mt76x02_dev *dev);
+int mt76x0u_mcu_init(struct mt76x02_dev *dev);
+static inline int mt76x0_firmware_running(struct mt76x02_dev *dev)
+{
+       return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
+}
 
 #endif
index fc9857f61771ccbc9d712c94402c455f498b8853..1bff2be45a13d9ea8af54b97f96dcbbe8a256482 100644 (file)
 #include <net/mac80211.h>
 #include <linux/debugfs.h>
 
-#include "../mt76.h"
-#include "regs.h"
+#include "../mt76x02.h"
+#include "eeprom.h"
 
 #define MT_CALIBRATE_INTERVAL          (4 * HZ)
 
-#define MT_FREQ_CAL_INIT_DELAY         (30 * HZ)
-#define MT_FREQ_CAL_CHECK_INTERVAL     (10 * HZ)
-#define MT_FREQ_CAL_ADJ_INTERVAL       (HZ / 2)
-
-#define MT_BBP_REG_VERSION             0x00
-
 #define MT_USB_AGGR_SIZE_LIMIT         21 /* * 1024B */
 #define MT_USB_AGGR_TIMEOUT            0x80 /* * 33ns */
-#define MT_RX_ORDER                    3
-#define MT_RX_URB_SIZE                 (PAGE_SIZE << MT_RX_ORDER)
-
-struct mt76x0_dma_buf {
-       struct urb *urb;
-       void *buf;
-       dma_addr_t dma;
-       size_t len;
-};
-
-struct mt76x0_mcu {
-       struct mutex mutex;
-
-       u8 msg_seq;
-
-       struct mt76x0_dma_buf resp;
-       struct completion resp_cmpl;
-
-       struct mt76_reg_pair *reg_pairs;
-       unsigned int reg_pairs_len;
-       u32 reg_base;
-       bool burst_read;
-};
-
-struct mac_stats {
-       u64 rx_stat[6];
-       u64 tx_stat[6];
-       u64 aggr_stat[2];
-       u64 aggr_n[32];
-       u64 zero_len_del[2];
-};
-
-#define N_RX_ENTRIES   16
-struct mt76x0_rx_queue {
-       struct mt76x0_dev *dev;
-
-       struct mt76x0_dma_buf_rx {
-               struct urb *urb;
-               struct page *p;
-       } e[N_RX_ENTRIES];
-
-       unsigned int start;
-       unsigned int end;
-       unsigned int entries;
-       unsigned int pending;
-};
-
-#define N_TX_ENTRIES   64
-
-struct mt76x0_tx_queue {
-       struct mt76x0_dev *dev;
-
-       struct mt76x0_dma_buf_tx {
-               struct urb *urb;
-               struct sk_buff *skb;
-       } e[N_TX_ENTRIES];
-
-       unsigned int start;
-       unsigned int end;
-       unsigned int entries;
-       unsigned int used;
-       unsigned int fifo_seq;
-};
-
-/* WCID allocation:
- *     0: mcast wcid
- *     1: bssid wcid
- *  1...: STAs
- * ...7e: group wcids
- *    7f: reserved
- */
-#define N_WCIDS                128
-#define GROUP_WCID(idx)        (254 - idx)
-
-struct mt76x0_eeprom_params;
-
-#define MT_EE_TEMPERATURE_SLOPE                39
-#define MT_FREQ_OFFSET_INVALID         -128
-
-/* addr req mask */
-#define MT_VEND_TYPE_EEPROM    BIT(31)
-#define MT_VEND_TYPE_CFG       BIT(30)
-#define MT_VEND_TYPE_MASK      (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
-
-#define MT_VEND_ADDR(type, n)  (MT_VEND_TYPE_##type | (n))
-
-enum mt_bw {
-       MT_BW_20,
-       MT_BW_40,
-};
-
-/**
- * struct mt76x0_dev - adapter structure
- * @lock:              protects @wcid->tx_rate.
- * @mac_lock:          locks out mac80211's tx status and rx paths.
- * @tx_lock:           protects @tx_q and changes of MT76_STATE_*_STATS
- *                     flags in @state.
- * @rx_lock:           protects @rx_q.
- * @con_mon_lock:      protects @ap_bssid, @bcn_*, @avg_rssi.
- * @mutex:             ensures exclusive access from mac80211 callbacks.
- * @reg_atomic_mutex:  ensures atomicity of indirect register accesses
- *                     (accesses to RF and BBP).
- * @hw_atomic_mutex:   ensures exclusive access to HW during critical
- *                     operations (power management, channel switch).
- */
-struct mt76x0_dev {
-       struct mt76_dev mt76; /* must be first */
-
-       struct mutex mutex;
-
-       struct mutex usb_ctrl_mtx;
-       u8 data[32];
-
-       struct tasklet_struct rx_tasklet;
-       struct tasklet_struct tx_tasklet;
-
-       u8 out_ep[__MT_EP_OUT_MAX];
-       u16 out_max_packet;
-       u8 in_ep[__MT_EP_IN_MAX];
-       u16 in_max_packet;
-
-       unsigned long wcid_mask[DIV_ROUND_UP(N_WCIDS, BITS_PER_LONG)];
-       unsigned long vif_mask;
-
-       struct mt76x0_mcu mcu;
-
-       struct delayed_work cal_work;
-       struct delayed_work mac_work;
-
-       struct workqueue_struct *stat_wq;
-       struct delayed_work stat_work;
 
-       struct mt76_wcid *mon_wcid;
-       struct mt76_wcid __rcu *wcid[N_WCIDS];
-
-       spinlock_t mac_lock;
-
-       const u16 *beacon_offsets;
-
-       u8 macaddr[ETH_ALEN];
-       struct mt76x0_eeprom_params *ee;
-
-       struct mutex reg_atomic_mutex;
-       struct mutex hw_atomic_mutex;
-
-       u32 rxfilter;
-       u32 debugfs_reg;
-
-       /* TX */
-       spinlock_t tx_lock;
-       struct mt76x0_tx_queue *tx_q;
-       struct sk_buff_head tx_skb_done;
-
-       atomic_t avg_ampdu_len;
-
-       /* RX */
-       spinlock_t rx_lock;
-       struct mt76x0_rx_queue rx_q;
-
-       /* Connection monitoring things */
-       spinlock_t con_mon_lock;
-       u8 ap_bssid[ETH_ALEN];
-
-       s8 bcn_freq_off;
-       u8 bcn_phy_mode;
-
-       int avg_rssi; /* starts at 0 and converges */
-
-       u8 agc_save;
-       u16 chainmask;
-
-       struct mac_stats stats;
-};
-
-struct mt76x0_wcid {
-       u8 idx;
-       u8 hw_key_idx;
-
-       u16 tx_rate;
-       bool tx_rate_set;
-       u8 tx_rate_nss;
-};
-
-struct mt76_vif {
-       u8 idx;
-
-       struct mt76_wcid group_wcid;
-};
-
-struct mt76_tx_status {
-       u8 valid:1;
-       u8 success:1;
-       u8 aggr:1;
-       u8 ack_req:1;
-       u8 is_probe:1;
-       u8 wcid;
-       u8 pktid;
-       u8 retry;
-       u16 rate;
-} __packed __aligned(2);
-
-struct mt76_sta {
-       struct mt76_wcid wcid;
-       struct mt76_tx_status status;
-       int n_frames;
-       u16 agg_ssn[IEEE80211_NUM_TIDS];
-};
-
-struct mt76_reg_pair {
-       u32 reg;
-       u32 value;
-};
-
-struct mt76x0_rxwi;
-
-extern const struct ieee80211_ops mt76x0_ops;
-
-static inline bool is_mt7610e(struct mt76x0_dev *dev)
+static inline bool is_mt7610e(struct mt76x02_dev *dev)
 {
        /* TODO */
        return false;
 }
 
-void mt76x0_init_debugfs(struct mt76x0_dev *dev);
-
-int mt76x0_wait_asic_ready(struct mt76x0_dev *dev);
-
-/* Compatibility with mt76 */
-#define mt76_rmw_field(_dev, _reg, _field, _val)       \
-       mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
-
-int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
-                           const struct mt76_reg_pair *data, int len);
-int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
-                         struct mt76_reg_pair *data, int len);
-int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
-                            const u32 *data, int n);
-void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr);
+void mt76x0_init_debugfs(struct mt76x02_dev *dev);
 
 /* Init */
-struct mt76x0_dev *mt76x0_alloc_device(struct device *dev);
-int mt76x0_init_hardware(struct mt76x0_dev *dev);
-int mt76x0_register_device(struct mt76x0_dev *dev);
-void mt76x0_cleanup(struct mt76x0_dev *dev);
-void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
-
-int mt76x0_mac_start(struct mt76x0_dev *dev);
-void mt76x0_mac_stop(struct mt76x0_dev *dev);
+struct mt76x02_dev *
+mt76x0_alloc_device(struct device *pdev,
+                   const struct mt76_driver_ops *drv_ops,
+                   const struct ieee80211_ops *ops);
+int mt76x0_init_hardware(struct mt76x02_dev *dev);
+int mt76x0_register_device(struct mt76x02_dev *dev);
+void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
+
+int mt76x0_mac_start(struct mt76x02_dev *dev);
+void mt76x0_mac_stop(struct mt76x02_dev *dev);
+
+int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
+void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_bss_conf *info, u32 changed);
+void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                   const u8 *mac_addr);
+void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif);
+int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
 
 /* PHY */
-void mt76x0_phy_init(struct mt76x0_dev *dev);
-int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev);
-void mt76x0_agc_save(struct mt76x0_dev *dev);
-void mt76x0_agc_restore(struct mt76x0_dev *dev);
-int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+void mt76x0_phy_init(struct mt76x02_dev *dev);
+int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev);
+void mt76x0_agc_save(struct mt76x02_dev *dev);
+void mt76x0_agc_restore(struct mt76x02_dev *dev);
+int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
                            struct cfg80211_chan_def *chandef);
-void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev);
-int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi);
-void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
-                              struct ieee80211_bss_conf *info);
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev);
+void mt76x0_phy_set_txpower(struct mt76x02_dev *dev);
 
 /* MAC */
 void mt76x0_mac_work(struct work_struct *work);
-void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
+void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot,
                                int ht_mode);
-void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb);
-void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval);
-void
-mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
-void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev);
-
-/* TX */
-void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
-               struct sk_buff *skb);
-int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                   u16 queue, const struct ieee80211_tx_queue_params *params);
-void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb);
-void mt76x0_tx_stat(struct work_struct *work);
-
-/* util */
-void mt76x0_remove_hdr_pad(struct sk_buff *skb);
-int mt76x0_insert_hdr_pad(struct sk_buff *skb);
-
-int mt76x0_dma_init(struct mt76x0_dev *dev);
-void mt76x0_dma_cleanup(struct mt76x0_dev *dev);
-
-int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
-                          struct mt76_wcid *wcid, int hw_q);
+void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb);
+void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval);
+void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev);
 
 #endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
new file mode 100644 (file)
index 0000000..87997cd
--- /dev/null
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+
+static int mt76x0e_start(struct ieee80211_hw *hw)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       mutex_lock(&dev->mt76.mutex);
+
+       mt76x02_mac_start(dev);
+       ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
+                                    MT_CALIBRATE_INTERVAL);
+       ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+                                    MT_CALIBRATE_INTERVAL);
+       set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+       mutex_unlock(&dev->mt76.mutex);
+
+       return 0;
+}
+
+static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
+{
+       cancel_delayed_work_sync(&dev->cal_work);
+       cancel_delayed_work_sync(&dev->mac_work);
+
+       if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
+                      0, 1000))
+               dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
+       mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
+
+       mt76x0_mac_stop(dev);
+
+       if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+                      0, 1000))
+               dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
+       mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN);
+}
+
+static void mt76x0e_stop(struct ieee80211_hw *hw)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       mutex_lock(&dev->mt76.mutex);
+       clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+       mt76x0e_stop_hw(dev);
+       mutex_unlock(&dev->mt76.mutex);
+}
+
+static const struct ieee80211_ops mt76x0e_ops = {
+       .tx = mt76x02_tx,
+       .start = mt76x0e_start,
+       .stop = mt76x0e_stop,
+       .config = mt76x0_config,
+       .add_interface = mt76x02_add_interface,
+       .remove_interface = mt76x02_remove_interface,
+       .configure_filter = mt76x02_configure_filter,
+};
+
+static int mt76x0e_register_device(struct mt76x02_dev *dev)
+{
+       int err;
+
+       mt76x0_chip_onoff(dev, true, false);
+       if (!mt76x02_wait_for_mac(&dev->mt76))
+               return -ETIMEDOUT;
+
+       mt76x02_dma_disable(dev);
+       err = mt76x0e_mcu_init(dev);
+       if (err < 0)
+               return err;
+
+       err = mt76x02_dma_init(dev);
+       if (err < 0)
+               return err;
+
+       err = mt76x0_init_hardware(dev);
+       if (err < 0)
+               return err;
+
+       if (mt76_chip(&dev->mt76) == 0x7610) {
+               u16 val;
+
+               mt76_clear(dev, MT_COEXCFG0, BIT(0));
+               val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0);
+               if (val & MT_EE_NIC_CONF_0_PA_IO_CURRENT) {
+                       u32 data;
+
+                       /* set external external PA I/O
+                        * current to 16mA
+                        */
+                       data = mt76_rr(dev, 0x11c);
+                       val |= 0xc03;
+                       mt76_wr(dev, 0x11c, val);
+               }
+       }
+
+       mt76_clear(dev, 0x110, BIT(9));
+       mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
+
+       return 0;
+}
+
+static int
+mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct mt76x02_dev *dev;
+       int ret;
+
+       ret = pcim_enable_device(pdev);
+       if (ret)
+               return ret;
+
+       ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+       if (ret)
+               return ret;
+
+       pci_set_master(pdev);
+
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
+       dev = mt76x0_alloc_device(&pdev->dev, NULL, &mt76x0e_ops);
+       if (!dev)
+               return -ENOMEM;
+
+       mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+
+       dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+       dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+       ret = mt76x0e_register_device(dev);
+       if (ret < 0)
+               goto error;
+
+       return 0;
+
+error:
+       ieee80211_free_hw(mt76_hw(dev));
+       return ret;
+}
+
+static void mt76x0e_cleanup(struct mt76x02_dev *dev)
+{
+       clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+       mt76x0_chip_onoff(dev, false, false);
+       mt76x0e_stop_hw(dev);
+       mt76x02_dma_cleanup(dev);
+       mt76x02_mcu_cleanup(&dev->mt76);
+}
+
+static void
+mt76x0e_remove(struct pci_dev *pdev)
+{
+       struct mt76_dev *mdev = pci_get_drvdata(pdev);
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+       mt76_unregister_device(mdev);
+       mt76x0e_cleanup(dev);
+       ieee80211_free_hw(mdev->hw);
+}
+
+static const struct pci_device_id mt76x0e_device_table[] = {
+       { PCI_DEVICE(0x14c3, 0x7630) },
+       { PCI_DEVICE(0x14c3, 0x7650) },
+       { },
+};
+
+MODULE_DEVICE_TABLE(pci, mt76x0e_device_table);
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct pci_driver mt76x0e_driver = {
+       .name           = KBUILD_MODNAME,
+       .id_table       = mt76x0e_device_table,
+       .probe          = mt76x0e_probe,
+       .remove         = mt76x0e_remove,
+};
+
+module_pci_driver(mt76x0e_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
new file mode 100644 (file)
index 0000000..6c66656
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+
+#define MT7610E_FIRMWARE       "mediatek/mt7610e.bin"
+#define MT7650E_FIRMWARE       "mediatek/mt7650e.bin"
+
+#define MT_MCU_IVB_ADDR                (MT_MCU_ILM_ADDR + 0x54000 - MT_MCU_IVB_SIZE)
+
+static int mt76x0e_load_firmware(struct mt76x02_dev *dev)
+{
+       bool is_combo_chip = mt76_chip(&dev->mt76) != 0x7610;
+       u32 val, ilm_len, dlm_len, offset = 0;
+       const struct mt76x02_fw_header *hdr;
+       const struct firmware *fw;
+       const char *firmware;
+       const u8 *fw_payload;
+       int len, err;
+
+       if (is_combo_chip)
+               firmware = MT7650E_FIRMWARE;
+       else
+               firmware = MT7610E_FIRMWARE;
+
+       err = request_firmware(&fw, firmware, dev->mt76.dev);
+       if (err)
+               return err;
+
+       if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+               err = -EIO;
+               goto out;
+       }
+
+       hdr = (const struct mt76x02_fw_header *)fw->data;
+
+       len = sizeof(*hdr);
+       len += le32_to_cpu(hdr->ilm_len);
+       len += le32_to_cpu(hdr->dlm_len);
+
+       if (fw->size != len) {
+               err = -EIO;
+               goto out;
+       }
+
+       fw_payload = fw->data + sizeof(*hdr);
+
+       val = le16_to_cpu(hdr->fw_ver);
+       dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+                (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+       val = le16_to_cpu(hdr->fw_ver);
+       dev_dbg(dev->mt76.dev,
+               "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+               (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+               le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+       if (is_combo_chip && !mt76_poll(dev, MT_MCU_SEMAPHORE_00, 1, 1, 600)) {
+               dev_err(dev->mt76.dev,
+                       "Could not get hardware semaphore for loading fw\n");
+               err = -ETIMEDOUT;
+               goto out;
+       }
+
+       /* upload ILM. */
+       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+       ilm_len = le32_to_cpu(hdr->ilm_len);
+       if (is_combo_chip) {
+               ilm_len -= MT_MCU_IVB_SIZE;
+               offset = MT_MCU_IVB_SIZE;
+       }
+       dev_dbg(dev->mt76.dev, "loading FW - ILM %u\n", ilm_len);
+       mt76_wr_copy(dev, MT_MCU_ILM_ADDR + offset, fw_payload + offset,
+                    ilm_len);
+
+       /* upload IVB. */
+       if (is_combo_chip) {
+               dev_dbg(dev->mt76.dev, "loading FW - IVB %u\n",
+                       MT_MCU_IVB_SIZE);
+               mt76_wr_copy(dev, MT_MCU_IVB_ADDR, fw_payload, MT_MCU_IVB_SIZE);
+       }
+
+       /* upload DLM. */
+       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
+       dlm_len = le32_to_cpu(hdr->dlm_len);
+       dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
+       mt76_wr_copy(dev, MT_MCU_ILM_ADDR,
+                    fw_payload + le32_to_cpu(hdr->ilm_len), dlm_len);
+
+       /* trigger firmware */
+       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+       if (is_combo_chip)
+               mt76_wr(dev, MT_MCU_INT_LEVEL, 0x3);
+       else
+               mt76_wr(dev, MT_MCU_RESET_CTL, 0x300);
+
+       if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 1000)) {
+               dev_err(dev->mt76.dev, "Firmware failed to start\n");
+               err = -ETIMEDOUT;
+               goto out;
+       }
+
+       dev_dbg(dev->mt76.dev, "Firmware running!\n");
+
+out:
+       if (is_combo_chip)
+               mt76_wr(dev, MT_MCU_SEMAPHORE_00, 0x1);
+       release_firmware(fw);
+
+       return err;
+}
+
+int mt76x0e_mcu_init(struct mt76x02_dev *dev)
+{
+       static const struct mt76_mcu_ops mt76x0e_mcu_ops = {
+               .mcu_msg_alloc = mt76x02_mcu_msg_alloc,
+               .mcu_send_msg = mt76x02_mcu_msg_send,
+       };
+       int err;
+
+       dev->mt76.mcu_ops = &mt76x0e_mcu_ops;
+
+       err = mt76x0e_load_firmware(dev);
+       if (err < 0)
+               return err;
+
+       set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+       return 0;
+}
index 5da7bfbe907ff65c8ac3b871a468ee4e287bba5c..4850a2db18d7e3ebb89cd9edd83df958ecf6e49f 100644 (file)
 #include "phy.h"
 #include "initvals.h"
 #include "initvals_phy.h"
+#include "../mt76x02_phy.h"
 
 #include <linux/etherdevice.h>
 
 static int
-mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
+mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
 {
        int ret = 0;
        u8 bank, reg;
@@ -39,7 +40,7 @@ mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
        if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
                return -EINVAL;
 
-       mutex_lock(&dev->reg_atomic_mutex);
+       mutex_lock(&dev->phy_mutex);
 
        if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
                ret = -ETIMEDOUT;
@@ -54,7 +55,7 @@ mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
                   MT_RF_CSR_CFG_KICK);
        trace_mt76x0_rf_write(&dev->mt76, bank, offset, value);
 out:
-       mutex_unlock(&dev->reg_atomic_mutex);
+       mutex_unlock(&dev->phy_mutex);
 
        if (ret < 0)
                dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n",
@@ -63,8 +64,7 @@ out:
        return ret;
 }
 
-static int
-mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
+static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset)
 {
        int ret = -ETIMEDOUT;
        u32 val;
@@ -79,7 +79,7 @@ mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
        if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
                return -EINVAL;
 
-       mutex_lock(&dev->reg_atomic_mutex);
+       mutex_lock(&dev->phy_mutex);
 
        if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
                goto out;
@@ -99,7 +99,7 @@ mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
                trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret);
        }
 out:
-       mutex_unlock(&dev->reg_atomic_mutex);
+       mutex_unlock(&dev->phy_mutex);
 
        if (ret < 0)
                dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n",
@@ -109,7 +109,7 @@ out:
 }
 
 static int
-rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val)
+rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
 {
        if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
                struct mt76_reg_pair pair = {
@@ -117,7 +117,7 @@ rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val)
                        .value = val,
                };
 
-               return mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+               return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
        } else {
                WARN_ON_ONCE(1);
                return mt76x0_rf_csr_wr(dev, offset, val);
@@ -125,7 +125,7 @@ rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val)
 }
 
 static int
-rf_rr(struct mt76x0_dev *dev, u32 offset)
+rf_rr(struct mt76x02_dev *dev, u32 offset)
 {
        int ret;
        u32 val;
@@ -135,7 +135,7 @@ rf_rr(struct mt76x0_dev *dev, u32 offset)
                        .reg = offset,
                };
 
-               ret = mt76x0_read_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+               ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
                val = pair.value;
        } else {
                WARN_ON_ONCE(1);
@@ -146,7 +146,7 @@ rf_rr(struct mt76x0_dev *dev, u32 offset)
 }
 
 static int
-rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
+rf_rmw(struct mt76x02_dev *dev, u32 offset, u8 mask, u8 val)
 {
        int ret;
 
@@ -162,23 +162,24 @@ rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
 }
 
 static int
-rf_set(struct mt76x0_dev *dev, u32 offset, u8 val)
+rf_set(struct mt76x02_dev *dev, u32 offset, u8 val)
 {
        return rf_rmw(dev, offset, 0, val);
 }
 
 #if 0
 static int
-rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask)
+rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask)
 {
        return rf_rmw(dev, offset, mask, 0);
 }
 #endif
 
-#define RF_RANDOM_WRITE(dev, tab) \
-       mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));
+#define RF_RANDOM_WRITE(dev, tab)              \
+       mt76_wr_rp(dev, MT_MCU_MEMMAP_RF,       \
+                  tab, ARRAY_SIZE(tab))
 
-int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
+int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
 {
        int i = 20;
        u32 val;
@@ -199,7 +200,7 @@ int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
 }
 
 static void
-mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width,
+mt76x0_bbp_set_ctrlch(struct mt76x02_dev *dev, enum nl80211_chan_width width,
                      u8 ctrl)
 {
        int core_val, agc_val;
@@ -225,25 +226,7 @@ mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width,
        mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
 }
 
-int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi)
-{
-       s8 lna_gain, rssi_offset;
-       int val;
-
-       if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) {
-               lna_gain = dev->ee->lna_gain_2ghz;
-               rssi_offset = dev->ee->rssi_offset_2ghz[0];
-       } else {
-               lna_gain = dev->ee->lna_gain_5ghz[0];
-               rssi_offset = dev->ee->rssi_offset_5ghz[0];
-       }
-
-       val = rxwi->rssi[0] + rssi_offset - lna_gain;
-
-       return val;
-}
-
-static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
+static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
 {
        u8 val;
 
@@ -300,14 +283,14 @@ static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
 }
 
 static void
-mt76x0_mac_set_ctrlch(struct mt76x0_dev *dev, bool primary_upper)
+mt76x0_mac_set_ctrlch(struct mt76x02_dev *dev, bool primary_upper)
 {
        mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
                       primary_upper);
 }
 
 static void
-mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
+mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
 {
        switch (band) {
        case NL80211_BAND_2GHZ:
@@ -339,16 +322,12 @@ mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
        }
 }
 
-#define EXT_PA_2G_5G            0x0
-#define EXT_PA_5G_ONLY          0x1
-#define EXT_PA_2G_ONLY          0x2
-#define INT_PA_2G_5G            0x3
-
 static void
-mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band)
 {
        u16 rf_band = rf_bw_band & 0xff00;
        u16 rf_bw = rf_bw_band & 0x00ff;
+       enum nl80211_band band;
        u32 mac_reg;
        u8 rf_val;
        int i;
@@ -495,11 +474,8 @@ mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band
        mac_reg &= ~0xC; /* Clear 0x518[3:2] */
        mt76_wr(dev, MT_RF_MISC, mac_reg);
 
-       if (dev->ee->pa_type == INT_PA_2G_5G ||
-           (dev->ee->pa_type == EXT_PA_5G_ONLY && (rf_band & RF_G_BAND)) ||
-           (dev->ee->pa_type == EXT_PA_2G_ONLY && (rf_band & RF_A_BAND))) {
-               ; /* Internal PA - nothing to do. */
-       } else {
+       band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+       if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
                /*
                        MT_RF_MISC (offset: 0x0518)
                        [2]1'b1: enable external A band PA, 1'b0: disable external A band PA
@@ -538,7 +514,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band
 }
 
 static void
-mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band)
 {
        int i;
 
@@ -551,20 +527,10 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_ban
 
                if (pair->reg == MT_BBP(AGC, 8)) {
                        u32 val = pair->value;
-                       u8 gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
-
-                       if (channel > 14) {
-                               if (channel < 100)
-                                       gain -= dev->ee->lna_gain_5ghz[0]*2;
-                               else if (channel < 137)
-                                       gain -= dev->ee->lna_gain_5ghz[1]*2;
-                               else
-                                       gain -= dev->ee->lna_gain_5ghz[2]*2;
-
-                       } else {
-                               gain -= dev->ee->lna_gain_2ghz*2;
-                       }
+                       u8 gain;
 
+                       gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
+                       gain -= dev->cal.rx.lna_gain * 2;
                        val &= ~MT_BBP_AGC_GAIN;
                        val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain);
                        mt76_wr(dev, pair->reg, val);
@@ -574,46 +540,27 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_ban
        }
 }
 
-#if 0
-static void
-mt76x0_extra_power_over_mac(struct mt76x0_dev *dev)
+static void mt76x0_ant_select(struct mt76x02_dev *dev)
 {
-       u32 val;
-
-       val = ((mt76_rr(dev, MT_TX_PWR_CFG_1) & 0x00003f00) >> 8);
-       val |= ((mt76_rr(dev, MT_TX_PWR_CFG_2) & 0x00003f00) << 8);
-       mt76_wr(dev, MT_TX_PWR_CFG_7, val);
-
-       /* TODO: fix VHT */
-       val = ((mt76_rr(dev, MT_TX_PWR_CFG_3) & 0x0000ff00) >> 8);
-       mt76_wr(dev, MT_TX_PWR_CFG_8, val);
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
 
-       val = ((mt76_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
-       mt76_wr(dev, MT_TX_PWR_CFG_9, val);
-}
-
-static void
-mt76x0_phy_set_tx_power(struct mt76x0_dev *dev, u8 channel, u8 rf_bw_band)
-{
-       u32 val;
-       int i;
-       int bw = (rf_bw_band & RF_BW_20) ? 0 : 1;
-
-       for (i = 0; i < 4; i++) {
-               if (channel <= 14)
-                       val = dev->ee->tx_pwr_cfg_2g[i][bw];
-               else
-                       val = dev->ee->tx_pwr_cfg_5g[i][bw];
-
-               mt76_wr(dev, MT_TX_PWR_CFG_0 + 4*i, val);
+       /* single antenna mode */
+       if (chan->band == NL80211_BAND_2GHZ) {
+               mt76_rmw(dev, MT_COEXCFG3,
+                        BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
+               mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
+       } else {
+               mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(2),
+                        BIT(4) | BIT(3));
+               mt76_clear(dev, MT_WLAN_FUN_CTRL,
+                          BIT(6) | BIT(5));
        }
-
-       mt76x0_extra_power_over_mac(dev);
+       mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
+       mt76_clear(dev, MT_COEXCFG0, BIT(2));
 }
-#endif
 
 static void
-mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
+mt76x0_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
 {
        enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4};
        int bw;
@@ -640,39 +587,27 @@ mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
                return ;
        }
 
-       mt76x0_mcu_function_select(dev, BW_SETTING, bw);
+       mt76x02_mcu_function_select(&dev->mt76, BW_SETTING, bw, false);
 }
 
-static void
-mt76x0_phy_set_chan_pwr(struct mt76x0_dev *dev, u8 channel)
+void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
 {
-       static const int mt76x0_tx_pwr_ch_list[] = {
-               1,2,3,4,5,6,7,8,9,10,11,12,13,14,
-               36,38,40,44,46,48,52,54,56,60,62,64,
-               100,102,104,108,110,112,116,118,120,124,126,128,132,134,136,140,
-               149,151,153,157,159,161,165,167,169,171,173,
-               42,58,106,122,155
-       };
-       int i;
-       u32 val;
+       struct mt76_rate_power *t = &dev->mt76.rate_power;
+       u8 info[2];
 
-       for (i = 0; i < ARRAY_SIZE(mt76x0_tx_pwr_ch_list); i++)
-               if (mt76x0_tx_pwr_ch_list[i] == channel)
-                       break;
+       mt76x0_get_power_info(dev, info);
+       mt76x0_get_tx_power_per_rate(dev);
 
-       if (WARN_ON(i == ARRAY_SIZE(mt76x0_tx_pwr_ch_list)))
-               return;
+       mt76x02_add_rate_power_offset(t, info[0]);
+       mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
+       dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
+       mt76x02_add_rate_power_offset(t, -info[0]);
 
-       val = mt76_rr(dev, MT_TX_ALC_CFG_0);
-       val &= ~0x3f3f;
-       val |= dev->ee->tx_pwr_per_chan[i];
-       val |= 0x2f2f << 16;
-       mt76_wr(dev, MT_TX_ALC_CFG_0, val);
+       mt76x02_phy_set_txpower(&dev->mt76, info[0], info[1]);
 }
 
-static int
-__mt76x0_phy_set_channel(struct mt76x0_dev *dev,
-                      struct cfg80211_chan_def *chandef)
+int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
+                          struct cfg80211_chan_def *chandef)
 {
        u32 ext_cca_chan[4] = {
                [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
@@ -706,6 +641,7 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
        freq1 = chandef->center_freq1;
        channel = chandef->chan->hw_value;
        rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND;
+       dev->mt76.chandef = *chandef;
 
        switch (chandef->width) {
        case NL80211_CHAN_WIDTH_40:
@@ -732,6 +668,7 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
        mt76x0_bbp_set_bw(dev, chandef->width);
        mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index);
        mt76x0_mac_set_ctrlch(dev, ch_group_index & 1);
+       mt76x0_ant_select(dev);
 
        mt76_rmw(dev, MT_EXT_CCA_CFG,
                 (MT_EXT_CCA_CFG_CCA0 |
@@ -743,6 +680,7 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
 
        mt76x0_phy_set_band(dev, chandef->chan->band);
        mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
+       mt76x0_read_rx_gain(dev);
 
        /* set Japan Tx filter at channel 14 */
        val = mt76_rr(dev, MT_BBP(CORE, 1));
@@ -757,35 +695,22 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
        /* Vendor driver don't do it */
        /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */
 
+       mt76x0_vco_cal(dev, channel);
        if (scan)
-               mt76x0_vco_cal(dev, channel);
+               mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false);
 
-       mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
-       mt76x0_phy_set_chan_pwr(dev, channel);
+       mt76x0_phy_set_txpower(dev);
 
-       dev->mt76.chandef = *chandef;
        return 0;
 }
 
-int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
-                          struct cfg80211_chan_def *chandef)
-{
-       int ret;
-
-       mutex_lock(&dev->hw_atomic_mutex);
-       ret = __mt76x0_phy_set_channel(dev, chandef);
-       mutex_unlock(&dev->hw_atomic_mutex);
-
-       return ret;
-}
-
-void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
 {
        u32 tx_alc, reg_val;
        u8 channel = dev->mt76.chandef.chan->hw_value;
        int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
 
-       mt76x0_mcu_calibrate(dev, MCU_CAL_R, 0);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, false);
 
        mt76x0_vco_cal(dev, channel);
 
@@ -797,34 +722,36 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
        reg_val &= 0xffffff7e;
        mt76_wr(dev, 0x2124, reg_val);
 
-       mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 0, false);
 
-       mt76x0_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz);
-       mt76x0_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz);
-       mt76x0_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
-       mt76x0_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz);
-       mt76x0_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz);
-       mt76x0_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, is_5ghz, false);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LOFT, is_5ghz, false);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_GROUP_DELAY,
+                             is_5ghz, false);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQ, is_5ghz, false);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RX_GROUP_DELAY,
+                             is_5ghz, false);
 
        mt76_wr(dev, 0x2124, reg_val);
        mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
        msleep(100);
 
-       mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false);
 }
 
-void mt76x0_agc_save(struct mt76x0_dev *dev)
+void mt76x0_agc_save(struct mt76x02_dev *dev)
 {
        /* Only one RX path */
        dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8)));
 }
 
-void mt76x0_agc_restore(struct mt76x0_dev *dev)
+void mt76x0_agc_restore(struct mt76x02_dev *dev)
 {
        mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save);
 }
 
-static void mt76x0_temp_sensor(struct mt76x0_dev *dev)
+static void mt76x0_temp_sensor(struct mt76x02_dev *dev)
 {
        u8 rf_b7_73, rf_b0_66, rf_b0_67;
        int cycle, temp;
@@ -860,7 +787,7 @@ static void mt76x0_temp_sensor(struct mt76x0_dev *dev)
        else
                sval |= 0xffffff00; /* Negative */
 
-       temp = (35 * (sval - dev->ee->temp_off))/ 10 + 25;
+       temp = (35 * (sval - dev->cal.rx.temp_offset)) / 10 + 25;
 
 done:
        rf_wr(dev, MT_RF(7, 73), rf_b7_73);
@@ -868,14 +795,17 @@ done:
        rf_wr(dev, MT_RF(0, 73), rf_b0_67);
 }
 
-static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev)
+static void mt76x0_dynamic_vga_tuning(struct mt76x02_dev *dev)
 {
+       struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
        u32 val, init_vga;
+       int avg_rssi;
 
-       init_vga = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 0x54 : 0x4E;
-       if (dev->avg_rssi > -60)
+       init_vga = chandef->chan->band == NL80211_BAND_5GHZ ? 0x54 : 0x4E;
+       avg_rssi = mt76x02_phy_get_min_avg_rssi(&dev->mt76);
+       if (avg_rssi > -60)
                init_vga -= 0x20;
-       else if (dev->avg_rssi > -70)
+       else if (avg_rssi > -70)
                init_vga -= 0x10;
 
        val = mt76_rr(dev, MT_BBP(AGC, 8));
@@ -886,8 +816,8 @@ static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev)
 
 static void mt76x0_phy_calibrate(struct work_struct *work)
 {
-       struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
-                                           cal_work.work);
+       struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+                                              cal_work.work);
 
        mt76x0_dynamic_vga_tuning(dev);
        mt76x0_temp_sensor(dev);
@@ -896,45 +826,7 @@ static void mt76x0_phy_calibrate(struct work_struct *work)
                                     MT_CALIBRATE_INTERVAL);
 }
 
-void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
-                              struct ieee80211_bss_conf *info)
-{
-       /* Start/stop collecting beacon data */
-       spin_lock_bh(&dev->con_mon_lock);
-       ether_addr_copy(dev->ap_bssid, info->bssid);
-       dev->avg_rssi = 0;
-       dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
-       spin_unlock_bh(&dev->con_mon_lock);
-}
-
-static void
-mt76x0_set_rx_chains(struct mt76x0_dev *dev)
-{
-       u32 val;
-
-       val = mt76_rr(dev, MT_BBP(AGC, 0));
-       val &= ~(BIT(3) | BIT(4));
-
-       if (dev->chainmask & BIT(1))
-               val |= BIT(3);
-
-       mt76_wr(dev, MT_BBP(AGC, 0), val);
-
-       mb();
-       val = mt76_rr(dev, MT_BBP(AGC, 0));
-}
-
-static void
-mt76x0_set_tx_dac(struct mt76x0_dev *dev)
-{
-       if (dev->chainmask & BIT(1))
-               mt76_set(dev, MT_BBP(TXBE, 5), 3);
-       else
-               mt76_clear(dev, MT_BBP(TXBE, 5), 3);
-}
-
-static void
-mt76x0_rf_init(struct mt76x0_dev *dev)
+static void mt76x0_rf_init(struct mt76x02_dev *dev)
 {
        int i;
        u8 val;
@@ -966,7 +858,8 @@ mt76x0_rf_init(struct mt76x0_dev *dev)
           E1: B0.R22<6:0>: xo_cxo<6:0>
           E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1>
         */
-       rf_wr(dev, MT_RF(0, 22), min_t(u8, dev->ee->rf_freq_off, 0xBF));
+       rf_wr(dev, MT_RF(0, 22),
+             min_t(u8, dev->cal.rx.freq_offset, 0xbf));
        val = rf_rr(dev, MT_RF(0, 22));
 
        /*
@@ -986,23 +879,11 @@ mt76x0_rf_init(struct mt76x0_dev *dev)
        rf_set(dev, MT_RF(0, 4), 0x80);
 }
 
-static void mt76x0_ant_select(struct mt76x0_dev *dev)
-{
-       /* Single antenna mode. */
-       mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
-       mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
-       mt76_clear(dev, MT_COEXCFG0, BIT(2));
-       mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
-}
-
-void mt76x0_phy_init(struct mt76x0_dev *dev)
+void mt76x0_phy_init(struct mt76x02_dev *dev)
 {
        INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate);
 
-       mt76x0_ant_select(dev);
-
        mt76x0_rf_init(dev);
-
-       mt76x0_set_rx_chains(dev);
-       mt76x0_set_tx_dac(dev);
+       mt76x02_phy_set_rxpath(&dev->mt76);
+       mt76x02_phy_set_txdac(&dev->mt76);
 }
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
deleted file mode 100644 (file)
index 16bed4a..0000000
+++ /dev/null
@@ -1,651 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MT76_REGS_H
-#define __MT76_REGS_H
-
-#include <linux/bitops.h>
-
-#define MT_ASIC_VERSION                        0x0000
-
-#define MT76XX_REV_E3          0x22
-#define MT76XX_REV_E4          0x33
-
-#define MT_CMB_CTRL                    0x0020
-#define MT_CMB_CTRL_XTAL_RDY           BIT(22)
-#define MT_CMB_CTRL_PLL_LD             BIT(23)
-
-#define MT_EFUSE_CTRL                  0x0024
-#define MT_EFUSE_CTRL_AOUT             GENMASK(5, 0)
-#define MT_EFUSE_CTRL_MODE             GENMASK(7, 6)
-#define MT_EFUSE_CTRL_LDO_OFF_TIME     GENMASK(13, 8)
-#define MT_EFUSE_CTRL_LDO_ON_TIME      GENMASK(15, 14)
-#define MT_EFUSE_CTRL_AIN              GENMASK(25, 16)
-#define MT_EFUSE_CTRL_KICK             BIT(30)
-#define MT_EFUSE_CTRL_SEL              BIT(31)
-
-#define MT_EFUSE_DATA_BASE             0x0028
-#define MT_EFUSE_DATA(_n)              (MT_EFUSE_DATA_BASE + ((_n) << 2))
-
-#define MT_COEXCFG0                    0x0040
-#define MT_COEXCFG0_COEX_EN            BIT(0)
-
-#define MT_COEXCFG3                    0x004c
-
-#define MT_LDO_CTRL_0                  0x006c
-#define MT_LDO_CTRL_1                  0x0070
-
-#define MT_WLAN_FUN_CTRL               0x0080
-#define MT_WLAN_FUN_CTRL_WLAN_EN       BIT(0)
-#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN   BIT(1)
-#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
-
-#define MT_WLAN_FUN_CTRL_WLAN_RESET    BIT(3) /* MT76x0 */
-#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
-
-#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ  BIT(4)
-#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL        BIT(5)
-#define MT_WLAN_FUN_CTRL_INV_ANT_SEL   BIT(6)
-#define MT_WLAN_FUN_CTRL_WAKE_HOST     BIT(7)
-
-#define MT_WLAN_FUN_CTRL_THERM_RST     BIT(8) /* MT76x2 */
-#define MT_WLAN_FUN_CTRL_THERM_CKEN    BIT(9) /* MT76x2 */
-
-#define MT_WLAN_FUN_CTRL_GPIO_IN       GENMASK(15, 8) /* MT76x0 */
-#define MT_WLAN_FUN_CTRL_GPIO_OUT      GENMASK(23, 16) /* MT76x0 */
-#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN   GENMASK(31, 24) /* MT76x0 */
-
-#define MT_XO_CTRL0                    0x0100
-#define MT_XO_CTRL1                    0x0104
-#define MT_XO_CTRL2                    0x0108
-#define MT_XO_CTRL3                    0x010c
-#define MT_XO_CTRL4                    0x0110
-
-#define MT_XO_CTRL5                    0x0114
-#define MT_XO_CTRL5_C2_VAL             GENMASK(14, 8)
-
-#define MT_XO_CTRL6                    0x0118
-#define MT_XO_CTRL6_C2_CTRL            GENMASK(14, 8)
-
-#define MT_XO_CTRL7                    0x011c
-
-#define MT_IOCFG_6                     0x0124
-#define MT_WLAN_MTC_CTRL               0x10148
-#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
-#define MT_WLAN_MTC_CTRL_PWR_ACK       BIT(12)
-#define MT_WLAN_MTC_CTRL_PWR_ACK_S     BIT(13)
-#define MT_WLAN_MTC_CTRL_BBP_MEM_PD    GENMASK(19, 16)
-#define MT_WLAN_MTC_CTRL_PBF_MEM_PD    BIT(20)
-#define MT_WLAN_MTC_CTRL_FCE_MEM_PD    BIT(21)
-#define MT_WLAN_MTC_CTRL_TSO_MEM_PD    BIT(22)
-#define MT_WLAN_MTC_CTRL_BBP_MEM_RB    BIT(24)
-#define MT_WLAN_MTC_CTRL_PBF_MEM_RB    BIT(25)
-#define MT_WLAN_MTC_CTRL_FCE_MEM_RB    BIT(26)
-#define MT_WLAN_MTC_CTRL_TSO_MEM_RB    BIT(27)
-#define MT_WLAN_MTC_CTRL_STATE_UP      BIT(28)
-
-#define MT_INT_SOURCE_CSR              0x0200
-#define MT_INT_MASK_CSR                        0x0204
-
-#define MT_INT_RX_DONE(_n)             BIT(_n)
-#define MT_INT_RX_DONE_ALL             GENMASK(1, 0)
-#define MT_INT_TX_DONE_ALL             GENMASK(13, 4)
-#define MT_INT_TX_DONE(_n)             BIT(_n + 4)
-#define MT_INT_RX_COHERENT             BIT(16)
-#define MT_INT_TX_COHERENT             BIT(17)
-#define MT_INT_ANY_COHERENT            BIT(18)
-#define MT_INT_MCU_CMD                 BIT(19)
-#define MT_INT_TBTT                    BIT(20)
-#define MT_INT_PRE_TBTT                        BIT(21)
-#define MT_INT_TX_STAT                 BIT(22)
-#define MT_INT_AUTO_WAKEUP             BIT(23)
-#define MT_INT_GPTIMER                 BIT(24)
-#define MT_INT_RXDELAYINT              BIT(26)
-#define MT_INT_TXDELAYINT              BIT(27)
-
-#define MT_WPDMA_GLO_CFG               0x0208
-#define MT_WPDMA_GLO_CFG_TX_DMA_EN     BIT(0)
-#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY   BIT(1)
-#define MT_WPDMA_GLO_CFG_RX_DMA_EN     BIT(2)
-#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY   BIT(3)
-#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE        GENMASK(5, 4)
-#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE     BIT(6)
-#define MT_WPDMA_GLO_CFG_BIG_ENDIAN    BIT(7)
-#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN   GENMASK(15, 8)
-#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS  BIT(30)
-#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET  BIT(31)
-
-#define MT_WPDMA_RST_IDX               0x020c
-
-#define MT_WPDMA_DELAY_INT_CFG         0x0210
-
-#define MT_WMM_AIFSN           0x0214
-#define MT_WMM_AIFSN_MASK              GENMASK(3, 0)
-#define MT_WMM_AIFSN_SHIFT(_n)         ((_n) * 4)
-
-#define MT_WMM_CWMIN           0x0218
-#define MT_WMM_CWMIN_MASK              GENMASK(3, 0)
-#define MT_WMM_CWMIN_SHIFT(_n)         ((_n) * 4)
-
-#define MT_WMM_CWMAX           0x021c
-#define MT_WMM_CWMAX_MASK              GENMASK(3, 0)
-#define MT_WMM_CWMAX_SHIFT(_n)         ((_n) * 4)
-
-#define MT_WMM_TXOP_BASE               0x0220
-#define MT_WMM_TXOP(_n)                        (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
-#define MT_WMM_TXOP_SHIFT(_n)          ((_n & 1) * 16)
-#define MT_WMM_TXOP_MASK               GENMASK(15, 0)
-
-#define MT_WMM_CTRL                    0x0230 /* MT76x0 */
-
-#define MT_FCE_DMA_ADDR                        0x0230
-#define MT_FCE_DMA_LEN                 0x0234
-
-#define MT_USB_DMA_CFG                 0x238
-#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT        GENMASK(7, 0)
-#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
-#define MT_USB_DMA_CFG_TX_WL_DROP      BIT(16)
-#define MT_USB_DMA_CFG_WAKEUP_EN       BIT(17)
-#define MT_USB_DMA_CFG_RX_DROP_OR_PADDING      BIT(18)
-#define MT_USB_DMA_CFG_TX_CLR          BIT(19)
-#define MT_USB_DMA_CFG_WL_LPK_EN       BIT(20)
-#define MT_USB_DMA_CFG_RX_BULK_AGG_EN  BIT(21)
-#define MT_USB_DMA_CFG_RX_BULK_EN      BIT(22)
-#define MT_USB_DMA_CFG_TX_BULK_EN      BIT(23)
-#define MT_USB_DMA_CFG_EP_OUT_VALID    GENMASK(29, 24)
-#define MT_USB_DMA_CFG_RX_BUSY         BIT(30)
-#define MT_USB_DMA_CFG_TX_BUSY         BIT(31)
-#if 0
-#define MT_USB_DMA_CFG_TX_CLR          BIT(19)
-#define MT_USB_DMA_CFG_TXOP_HALT       BIT(20)
-#define MT_USB_DMA_CFG_RX_BULK_AGG_EN  BIT(21)
-#define MT_USB_DMA_CFG_RX_BULK_EN      BIT(22)
-#define MT_USB_DMA_CFG_TX_BULK_EN      BIT(23)
-#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25)
-#endif
-
-#define MT_TSO_CTRL                    0x0250
-#define MT_HEADER_TRANS_CTRL_REG       0x0260
-
-#define MT_US_CYC_CFG                  0x02a4
-#define MT_US_CYC_CNT                  GENMASK(7, 0)
-
-#define MT_TX_RING_BASE                        0x0300
-#define MT_RX_RING_BASE                        0x03c0
-#define MT_RING_SIZE                   0x10
-
-#define MT_TX_HW_QUEUE_MCU             8
-#define MT_TX_HW_QUEUE_MGMT            9
-
-#define MT_PBF_SYS_CTRL                        0x0400
-#define MT_PBF_SYS_CTRL_MCU_RESET      BIT(0)
-#define MT_PBF_SYS_CTRL_DMA_RESET      BIT(1)
-#define MT_PBF_SYS_CTRL_MAC_RESET      BIT(2)
-#define MT_PBF_SYS_CTRL_PBF_RESET      BIT(3)
-#define MT_PBF_SYS_CTRL_ASY_RESET      BIT(4)
-
-#define MT_PBF_CFG                     0x0404
-#define MT_PBF_CFG_TX0Q_EN             BIT(0)
-#define MT_PBF_CFG_TX1Q_EN             BIT(1)
-#define MT_PBF_CFG_TX2Q_EN             BIT(2)
-#define MT_PBF_CFG_TX3Q_EN             BIT(3)
-#define MT_PBF_CFG_RX0Q_EN             BIT(4)
-#define MT_PBF_CFG_RX_DROP_EN          BIT(8)
-
-#define MT_PBF_TX_MAX_PCNT             0x0408
-#define MT_PBF_RX_MAX_PCNT             0x040c
-
-#define MT_BCN_OFFSET_BASE             0x041c
-#define MT_BCN_OFFSET(_n)              (MT_BCN_OFFSET_BASE + ((_n) << 2))
-
-#define MT_RXQ_STA                     0x0430
-#define MT_TXQ_STA                     0x0434
-#define        MT_RF_CSR_CFG                   0x0500
-#define MT_RF_CSR_CFG_DATA             GENMASK(7, 0)
-#define MT_RF_CSR_CFG_REG_ID           GENMASK(13, 8)
-#define MT_RF_CSR_CFG_REG_BANK         GENMASK(17, 14)
-#define MT_RF_CSR_CFG_WR               BIT(30)
-#define MT_RF_CSR_CFG_KICK             BIT(31)
-
-#define MT_RF_BYPASS_0                 0x0504
-#define MT_RF_BYPASS_1                 0x0508
-#define MT_RF_SETTING_0                        0x050c
-
-#define MT_RF_MISC                     0x0518
-#define MT_RF_DATA_WRITE               0x0524
-
-#define MT_RF_CTRL                     0x0528
-#define MT_RF_CTRL_ADDR                        GENMASK(11, 0)
-#define MT_RF_CTRL_WRITE               BIT(12)
-#define MT_RF_CTRL_BUSY                        BIT(13)
-#define MT_RF_CTRL_IDX                 BIT(16)
-
-#define MT_RF_DATA_READ                        0x052c
-
-#define MT_COM_REG0                    0x0730
-#define MT_COM_REG1                    0x0734
-#define MT_COM_REG2                    0x0738
-#define MT_COM_REG3                    0x073C
-
-#define MT_FCE_PSE_CTRL                        0x0800
-#define MT_FCE_PARAMETERS              0x0804
-#define MT_FCE_CSO                     0x0808
-
-#define MT_FCE_L2_STUFF                        0x080c
-#define MT_FCE_L2_STUFF_HT_L2_EN       BIT(0)
-#define MT_FCE_L2_STUFF_QOS_L2_EN      BIT(1)
-#define MT_FCE_L2_STUFF_RX_STUFF_EN    BIT(2)
-#define MT_FCE_L2_STUFF_TX_STUFF_EN    BIT(3)
-#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
-#define MT_FCE_L2_STUFF_MVINV_BSWAP    BIT(5)
-#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
-#define MT_FCE_L2_STUFF_TS_LEN_EN      GENMASK(23, 16)
-#define MT_FCE_L2_STUFF_OTHER_PORT     GENMASK(25, 24)
-
-#define MT_FCE_WLAN_FLOW_CONTROL1      0x0824
-
-#define MT_TX_CPU_FROM_FCE_BASE_PTR    0x09a0
-#define MT_TX_CPU_FROM_FCE_MAX_COUNT   0x09a4
-#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX        0x09a8
-
-#define MT_FCE_PDMA_GLOBAL_CONF                0x09c4
-
-#define MT_PAUSE_ENABLE_CONTROL1       0x0a38
-
-#define MT_FCE_SKIP_FS                 0x0a6c
-
-#define MT_MAC_CSR0                    0x1000
-#define MT_MAC_SYS_CTRL                        0x1004
-#define MT_MAC_SYS_CTRL_RESET_CSR      BIT(0)
-#define MT_MAC_SYS_CTRL_RESET_BBP      BIT(1)
-#define MT_MAC_SYS_CTRL_ENABLE_TX      BIT(2)
-#define MT_MAC_SYS_CTRL_ENABLE_RX      BIT(3)
-
-#define MT_MAC_ADDR_DW0                        0x1008
-#define MT_MAC_ADDR_DW1                        0x100c
-#define MT_MAC_ADDR_DW1_U2ME_MASK      GENMASK(23, 16)
-
-#define MT_MAC_BSSID_DW0               0x1010
-#define MT_MAC_BSSID_DW1               0x1014
-#define MT_MAC_BSSID_DW1_ADDR          GENMASK(15, 0)
-#define MT_MAC_BSSID_DW1_MBSS_MODE     GENMASK(17, 16)
-#define MT_MAC_BSSID_DW1_MBEACON_N     GENMASK(20, 18)
-#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT        BIT(21)
-#define MT_MAC_BSSID_DW1_MBSS_MODE_B2  BIT(22)
-#define MT_MAC_BSSID_DW1_MBEACON_N_B3  BIT(23)
-#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
-
-#define MT_MAX_LEN_CFG                 0x1018
-#define MT_MAX_LEN_CFG_AMPDU           GENMASK(13, 12)
-
-#define MT_LED_CFG                     0x102c
-
-#define MT_AMPDU_MAX_LEN_20M1S         0x1030
-#define MT_AMPDU_MAX_LEN_20M2S         0x1034
-#define MT_AMPDU_MAX_LEN_40M1S         0x1038
-#define MT_AMPDU_MAX_LEN_40M2S         0x103c
-#define MT_AMPDU_MAX_LEN               0x1040
-
-#define MT_WCID_DROP_BASE              0x106c
-#define MT_WCID_DROP(_n)               (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
-#define MT_WCID_DROP_MASK(_n)          BIT((_n) % 32)
-
-#define MT_BCN_BYPASS_MASK             0x108c
-
-#define MT_MAC_APC_BSSID_BASE          0x1090
-#define MT_MAC_APC_BSSID_L(_n)         (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
-#define MT_MAC_APC_BSSID_H(_n)         (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
-#define MT_MAC_APC_BSSID_H_ADDR                GENMASK(15, 0)
-#define MT_MAC_APC_BSSID0_H_EN         BIT(16)
-
-#define MT_XIFS_TIME_CFG               0x1100
-#define MT_XIFS_TIME_CFG_CCK_SIFS      GENMASK(7, 0)
-#define MT_XIFS_TIME_CFG_OFDM_SIFS     GENMASK(15, 8)
-#define MT_XIFS_TIME_CFG_OFDM_XIFS     GENMASK(19, 16)
-#define MT_XIFS_TIME_CFG_EIFS          GENMASK(28, 20)
-#define MT_XIFS_TIME_CFG_BB_RXEND_EN   BIT(29)
-
-#define MT_BKOFF_SLOT_CFG              0x1104
-#define MT_BKOFF_SLOT_CFG_SLOTTIME     GENMASK(7, 0)
-#define MT_BKOFF_SLOT_CFG_CC_DELAY     GENMASK(11, 8)
-
-#define MT_BEACON_TIME_CFG             0x1114
-#define MT_BEACON_TIME_CFG_INTVAL      GENMASK(15, 0)
-#define MT_BEACON_TIME_CFG_TIMER_EN    BIT(16)
-#define MT_BEACON_TIME_CFG_SYNC_MODE   GENMASK(18, 17)
-#define MT_BEACON_TIME_CFG_TBTT_EN     BIT(19)
-#define MT_BEACON_TIME_CFG_BEACON_TX   BIT(20)
-#define MT_BEACON_TIME_CFG_TSF_COMP    GENMASK(31, 24)
-
-#define MT_TBTT_SYNC_CFG               0x1118
-#define MT_TBTT_TIMER_CFG              0x1124
-
-#define MT_INT_TIMER_CFG               0x1128
-#define MT_INT_TIMER_CFG_PRE_TBTT      GENMASK(15, 0)
-#define MT_INT_TIMER_CFG_GP_TIMER      GENMASK(31, 16)
-
-#define MT_INT_TIMER_EN                        0x112c
-#define MT_INT_TIMER_EN_PRE_TBTT_EN    BIT(0)
-#define MT_INT_TIMER_EN_GP_TIMER_EN    BIT(1)
-
-#define MT_MAC_STATUS                  0x1200
-#define MT_MAC_STATUS_TX               BIT(0)
-#define MT_MAC_STATUS_RX               BIT(1)
-
-#define MT_PWR_PIN_CFG                 0x1204
-#define MT_AUX_CLK_CFG                 0x120c
-
-#define MT_BB_PA_MODE_CFG0             0x1214
-#define MT_BB_PA_MODE_CFG1             0x1218
-#define MT_RF_PA_MODE_CFG0             0x121c
-#define MT_RF_PA_MODE_CFG1             0x1220
-
-#define MT_RF_PA_MODE_ADJ0             0x1228
-#define MT_RF_PA_MODE_ADJ1             0x122c
-
-#define MT_DACCLK_EN_DLY_CFG           0x1264
-
-#define MT_EDCA_CFG_BASE               0x1300
-#define MT_EDCA_CFG_AC(_n)             (MT_EDCA_CFG_BASE + ((_n) << 2))
-#define MT_EDCA_CFG_TXOP               GENMASK(7, 0)
-#define MT_EDCA_CFG_AIFSN              GENMASK(11, 8)
-#define MT_EDCA_CFG_CWMIN              GENMASK(15, 12)
-#define MT_EDCA_CFG_CWMAX              GENMASK(19, 16)
-
-#define MT_TX_PWR_CFG_0                        0x1314
-#define MT_TX_PWR_CFG_1                        0x1318
-#define MT_TX_PWR_CFG_2                        0x131c
-#define MT_TX_PWR_CFG_3                        0x1320
-#define MT_TX_PWR_CFG_4                        0x1324
-
-#define MT_TX_BAND_CFG                 0x132c
-#define MT_TX_BAND_CFG_UPPER_40M       BIT(0)
-#define MT_TX_BAND_CFG_5G              BIT(1)
-#define MT_TX_BAND_CFG_2G              BIT(2)
-
-#define MT_HT_FBK_TO_LEGACY            0x1384
-#define MT_TX_MPDU_ADJ_INT             0x1388
-
-#define MT_TX_PWR_CFG_7                        0x13d4
-#define MT_TX_PWR_CFG_8                        0x13d8
-#define MT_TX_PWR_CFG_9                        0x13dc
-
-#define MT_TX_SW_CFG0                  0x1330
-#define MT_TX_SW_CFG1                  0x1334
-#define MT_TX_SW_CFG2                  0x1338
-
-#define MT_TXOP_CTRL_CFG               0x1340
-#define MT_TXOP_TRUN_EN                        GENMASK(5, 0)
-#define MT_TXOP_EXT_CCA_DLY            GENMASK(15, 8)
-#define MT_TXOP_CTRL
-
-#define MT_TX_RTS_CFG                  0x1344
-#define MT_TX_RTS_CFG_RETRY_LIMIT      GENMASK(7, 0)
-#define MT_TX_RTS_CFG_THRESH           GENMASK(23, 8)
-#define MT_TX_RTS_FALLBACK             BIT(24)
-
-#define MT_TX_TIMEOUT_CFG              0x1348
-#define MT_TX_RETRY_CFG                        0x134c
-#define MT_TX_LINK_CFG                 0x1350
-#define MT_HT_FBK_CFG0                 0x1354
-#define MT_HT_FBK_CFG1                 0x1358
-#define MT_LG_FBK_CFG0                 0x135c
-#define MT_LG_FBK_CFG1                 0x1360
-
-#define MT_CCK_PROT_CFG                        0x1364
-#define MT_OFDM_PROT_CFG               0x1368
-#define MT_MM20_PROT_CFG               0x136c
-#define MT_MM40_PROT_CFG               0x1370
-#define MT_GF20_PROT_CFG               0x1374
-#define MT_GF40_PROT_CFG               0x1378
-
-#define MT_PROT_RATE                   GENMASK(15, 0)
-#define MT_PROT_CTRL_RTS_CTS           BIT(16)
-#define MT_PROT_CTRL_CTS2SELF          BIT(17)
-#define MT_PROT_NAV_SHORT              BIT(18)
-#define MT_PROT_NAV_LONG               BIT(19)
-#define MT_PROT_TXOP_ALLOW_CCK         BIT(20)
-#define MT_PROT_TXOP_ALLOW_OFDM                BIT(21)
-#define MT_PROT_TXOP_ALLOW_MM20                BIT(22)
-#define MT_PROT_TXOP_ALLOW_MM40                BIT(23)
-#define MT_PROT_TXOP_ALLOW_GF20                BIT(24)
-#define MT_PROT_TXOP_ALLOW_GF40                BIT(25)
-#define MT_PROT_RTS_THR_EN             BIT(26)
-#define MT_PROT_RATE_CCK_11            0x0003
-#define MT_PROT_RATE_OFDM_6            0x4000
-#define MT_PROT_RATE_OFDM_24           0x4004
-#define MT_PROT_RATE_DUP_OFDM_24       0x4084
-#define MT_PROT_TXOP_ALLOW_ALL         GENMASK(25, 20)
-#define MT_PROT_TXOP_ALLOW_BW20                (MT_PROT_TXOP_ALLOW_ALL &       \
-                                        ~MT_PROT_TXOP_ALLOW_MM40 &     \
-                                        ~MT_PROT_TXOP_ALLOW_GF40)
-
-#define MT_EXP_ACK_TIME                        0x1380
-
-#define MT_TX_PWR_CFG_0_EXT            0x1390
-#define MT_TX_PWR_CFG_1_EXT            0x1394
-
-#define MT_TX_FBK_LIMIT                        0x1398
-#define MT_TX_FBK_LIMIT_MPDU_FBK       GENMASK(7, 0)
-#define MT_TX_FBK_LIMIT_AMPDU_FBK      GENMASK(15, 8)
-#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR  BIT(16)
-#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
-#define MT_TX_FBK_LIMIT_RATE_LUT       BIT(18)
-
-#define MT_TX0_RF_GAIN_CORR            0x13a0
-#define MT_TX1_RF_GAIN_CORR            0x13a4
-#define MT_TX0_RF_GAIN_ATTEN           0x13a8
-
-#define MT_TX_ALC_CFG_0                        0x13b0
-#define MT_TX_ALC_CFG_0_CH_INIT_0      GENMASK(5, 0)
-#define MT_TX_ALC_CFG_0_CH_INIT_1      GENMASK(13, 8)
-#define MT_TX_ALC_CFG_0_LIMIT_0                GENMASK(21, 16)
-#define MT_TX_ALC_CFG_0_LIMIT_1                GENMASK(29, 24)
-
-#define MT_TX_ALC_CFG_1                        0x13b4
-#define MT_TX_ALC_CFG_1_TEMP_COMP      GENMASK(5, 0)
-
-#define MT_TX_ALC_CFG_2                        0x13a8
-#define MT_TX_ALC_CFG_2_TEMP_COMP      GENMASK(5, 0)
-
-#define MT_TX0_BB_GAIN_ATTEN           0x13c0
-
-#define MT_TX_ALC_VGA3                 0x13c8
-
-#define MT_TX_PROT_CFG6                        0x13e0
-#define MT_TX_PROT_CFG7                        0x13e4
-#define MT_TX_PROT_CFG8                        0x13e8
-
-#define MT_PIFS_TX_CFG                 0x13ec
-
-#define MT_RX_FILTR_CFG                        0x1400
-
-#define MT_RX_FILTR_CFG_CRC_ERR                BIT(0)
-#define MT_RX_FILTR_CFG_PHY_ERR                BIT(1)
-#define MT_RX_FILTR_CFG_PROMISC                BIT(2)
-#define MT_RX_FILTR_CFG_OTHER_BSS      BIT(3)
-#define MT_RX_FILTR_CFG_VER_ERR                BIT(4)
-#define MT_RX_FILTR_CFG_MCAST          BIT(5)
-#define MT_RX_FILTR_CFG_BCAST          BIT(6)
-#define MT_RX_FILTR_CFG_DUP            BIT(7)
-#define MT_RX_FILTR_CFG_CFACK          BIT(8)
-#define MT_RX_FILTR_CFG_CFEND          BIT(9)
-#define MT_RX_FILTR_CFG_ACK            BIT(10)
-#define MT_RX_FILTR_CFG_CTS            BIT(11)
-#define MT_RX_FILTR_CFG_RTS            BIT(12)
-#define MT_RX_FILTR_CFG_PSPOLL         BIT(13)
-#define MT_RX_FILTR_CFG_BA             BIT(14)
-#define MT_RX_FILTR_CFG_BAR            BIT(15)
-#define MT_RX_FILTR_CFG_CTRL_RSV       BIT(16)
-
-#define MT_AUTO_RSP_CFG                        0x1404
-
-#define MT_AUTO_RSP_PREAMB_SHORT       BIT(4)
-
-#define MT_LEGACY_BASIC_RATE           0x1408
-#define MT_HT_BASIC_RATE               0x140c
-#define MT_HT_CTRL_CFG                 0x1410
-#define MT_RX_PARSER_CFG               0x1418
-#define MT_RX_PARSER_RX_SET_NAV_ALL    BIT(0)
-
-#define MT_EXT_CCA_CFG                 0x141c
-#define MT_EXT_CCA_CFG_CCA0            GENMASK(1, 0)
-#define MT_EXT_CCA_CFG_CCA1            GENMASK(3, 2)
-#define MT_EXT_CCA_CFG_CCA2            GENMASK(5, 4)
-#define MT_EXT_CCA_CFG_CCA3            GENMASK(7, 6)
-#define MT_EXT_CCA_CFG_CCA_MASK                GENMASK(11, 8)
-#define MT_EXT_CCA_CFG_ED_CCA_MASK     GENMASK(15, 12)
-
-#define MT_TX_SW_CFG3                  0x1478
-
-#define MT_PN_PAD_MODE                 0x150c
-
-#define MT_TXOP_HLDR_ET                        0x1608
-
-#define MT_PROT_AUTO_TX_CFG            0x1648
-
-#define MT_RX_STA_CNT0                 0x1700
-#define MT_RX_STA_CNT1                 0x1704
-#define MT_RX_STA_CNT2                 0x1708
-#define MT_TX_STA_CNT0                 0x170c
-#define MT_TX_STA_CNT1                 0x1710
-#define MT_TX_STA_CNT2                 0x1714
-
-/* Vendor driver defines content of the second word of STAT_FIFO as follows:
- *     MT_TX_STAT_FIFO_RATE            GENMASK(26, 16)
- *     MT_TX_STAT_FIFO_ETXBF           BIT(27)
- *     MT_TX_STAT_FIFO_SND             BIT(28)
- *     MT_TX_STAT_FIFO_ITXBF           BIT(29)
- * However, tests show that b16-31 have the same layout as TXWI rate_ctl
- * with rate set to rate at which frame was acked.
- */
-#define MT_TX_STAT_FIFO                        0x1718
-#define MT_TX_STAT_FIFO_VALID          BIT(0)
-#define MT_TX_STAT_FIFO_SUCCESS                BIT(5)
-#define MT_TX_STAT_FIFO_AGGR           BIT(6)
-#define MT_TX_STAT_FIFO_ACKREQ         BIT(7)
-#define MT_TX_STAT_FIFO_WCID           GENMASK(15, 8)
-#define MT_TX_STAT_FIFO_RATE           GENMASK(31, 16)
-
-#define MT_TX_AGG_STAT                 0x171c
-
-#define MT_TX_AGG_CNT_BASE0            0x1720
-
-#define MT_MPDU_DENSITY_CNT            0x1740
-
-#define MT_TX_AGG_CNT_BASE1            0x174c
-
-#define MT_TX_AGG_CNT(_id)             ((_id) < 8 ?                    \
-                                        MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
-                                        MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
-
-#define MT_TX_STAT_FIFO_EXT            0x1798
-#define MT_TX_STAT_FIFO_EXT_RETRY      GENMASK(7, 0)
-#define MT_TX_STAT_FIFO_EXT_PKTID      GENMASK(15, 8)
-
-#define MT_BBP_CORE_BASE               0x2000
-#define MT_BBP_IBI_BASE                        0x2100
-#define MT_BBP_AGC_BASE                        0x2300
-#define MT_BBP_TXC_BASE                        0x2400
-#define MT_BBP_RXC_BASE                        0x2500
-#define MT_BBP_TXO_BASE                        0x2600
-#define MT_BBP_TXBE_BASE               0x2700
-#define MT_BBP_RXFE_BASE               0x2800
-#define MT_BBP_RXO_BASE                        0x2900
-#define MT_BBP_DFS_BASE                        0x2a00
-#define MT_BBP_TR_BASE                 0x2b00
-#define MT_BBP_CAL_BASE                        0x2c00
-#define MT_BBP_DSC_BASE                        0x2e00
-#define MT_BBP_PFMU_BASE               0x2f00
-
-#define MT_BBP(_type, _n)              (MT_BBP_##_type##_BASE + ((_n) << 2))
-
-#define MT_BBP_CORE_R1_BW              GENMASK(4, 3)
-
-#define MT_BBP_AGC_R0_CTRL_CHAN                GENMASK(9, 8)
-#define MT_BBP_AGC_R0_BW               GENMASK(14, 12)
-
-/* AGC, R4/R5 */
-#define MT_BBP_AGC_LNA_GAIN            GENMASK(21, 16)
-
-/* AGC, R8/R9 */
-#define MT_BBP_AGC_GAIN                        GENMASK(14, 8)
-
-#define MT_BBP_AGC20_RSSI0             GENMASK(7, 0)
-#define MT_BBP_AGC20_RSSI1             GENMASK(15, 8)
-
-#define MT_BBP_TXBE_R0_CTRL_CHAN       GENMASK(1, 0)
-
-#define MT_WCID_ADDR_BASE              0x1800
-#define MT_WCID_ADDR(_n)               (MT_WCID_ADDR_BASE + (_n) * 8)
-
-#define MT_SRAM_BASE                   0x4000
-
-#define MT_WCID_KEY_BASE               0x8000
-#define MT_WCID_KEY(_n)                        (MT_WCID_KEY_BASE + (_n) * 32)
-
-#define MT_WCID_IV_BASE                        0xa000
-#define MT_WCID_IV(_n)                 (MT_WCID_IV_BASE + (_n) * 8)
-
-#define MT_WCID_ATTR_BASE              0xa800
-#define MT_WCID_ATTR(_n)               (MT_WCID_ATTR_BASE + (_n) * 4)
-
-#define MT_WCID_ATTR_PAIRWISE          BIT(0)
-#define MT_WCID_ATTR_PKEY_MODE         GENMASK(3, 1)
-#define MT_WCID_ATTR_BSS_IDX           GENMASK(6, 4)
-#define MT_WCID_ATTR_RXWI_UDF          GENMASK(9, 7)
-#define MT_WCID_ATTR_PKEY_MODE_EXT     BIT(10)
-#define MT_WCID_ATTR_BSS_IDX_EXT       BIT(11)
-#define MT_WCID_ATTR_WAPI_MCBC         BIT(15)
-#define MT_WCID_ATTR_WAPI_KEYID                GENMASK(31, 24)
-
-#define MT_SKEY_BASE_0                 0xac00
-#define MT_SKEY_BASE_1                 0xb400
-#define MT_SKEY_0(_bss, _idx)          \
-       (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
-#define MT_SKEY_1(_bss, _idx)          \
-       (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
-#define MT_SKEY(_bss, _idx)            \
-       ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
-
-#define MT_SKEY_MODE_BASE_0            0xb000
-#define MT_SKEY_MODE_BASE_1            0xb3f0
-#define MT_SKEY_MODE_0(_bss)           \
-       (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
-#define MT_SKEY_MODE_1(_bss)           \
-       (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
-#define MT_SKEY_MODE(_bss)             \
-       ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
-#define MT_SKEY_MODE_MASK              GENMASK(3, 0)
-#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
-
-#define MT_BEACON_BASE                 0xc000
-
-#define MT_TEMP_SENSOR                 0x1d000
-#define MT_TEMP_SENSOR_VAL             GENMASK(6, 0)
-
-enum mt76_cipher_type {
-       MT_CIPHER_NONE,
-       MT_CIPHER_WEP40,
-       MT_CIPHER_WEP104,
-       MT_CIPHER_TKIP,
-       MT_CIPHER_AES_CCMP,
-       MT_CIPHER_CKIP40,
-       MT_CIPHER_CKIP104,
-       MT_CIPHER_CKIP128,
-       MT_CIPHER_WAPI,
-};
-
-#endif
index 8a752a09f2dc164d8ce241dde753858e1b6933d0..75d1d6738c342bcb44884b918352f6741caa3cd9 100644 (file)
@@ -17,7 +17,6 @@
 
 #include <linux/tracepoint.h>
 #include "mt76x0.h"
-#include "mac.h"
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM mt76x0
@@ -178,11 +177,11 @@ DECLARE_EVENT_CLASS(dev_simple_evt,
 );
 
 TRACE_EVENT(mt76x0_rx,
-       TP_PROTO(struct mt76_dev *dev, struct mt76x0_rxwi *rxwi, u32 f),
+       TP_PROTO(struct mt76_dev *dev, struct mt76x02_rxwi *rxwi, u32 f),
        TP_ARGS(dev, rxwi, f),
        TP_STRUCT__entry(
                DEV_ENTRY
-               __field_struct(struct mt76x0_rxwi, rxwi)
+               __field_struct(struct mt76x02_rxwi, rxwi)
                __field(u32, fce_info)
        ),
        TP_fast_assign(
@@ -197,13 +196,13 @@ TRACE_EVENT(mt76x0_rx,
 
 TRACE_EVENT(mt76x0_tx,
        TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb,
-                struct mt76_sta *sta, struct mt76_txwi *h),
+                struct mt76x02_sta *sta, struct mt76x02_txwi *h),
        TP_ARGS(dev, skb, sta, h),
        TP_STRUCT__entry(
                DEV_ENTRY
-               __field_struct(struct mt76_txwi, h)
+               __field_struct(struct mt76x02_txwi, h)
                __field(struct sk_buff *, skb)
-               __field(struct mt76_sta *, sta)
+               __field(struct mt76x02_sta *, sta)
        ),
        TP_fast_assign(
                DEV_ASSIGN;
@@ -211,11 +210,11 @@ TRACE_EVENT(mt76x0_tx,
                __entry->skb = skb;
                __entry->sta = sta;
        ),
-       TP_printk(DEV_PR_FMT "skb:%p sta:%p  flg:%04hx rate_ctl:%04hx "
+       TP_printk(DEV_PR_FMT "skb:%p sta:%p  flg:%04hx rate:%04hx "
                  "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
                  __entry->skb, __entry->sta,
                  le16_to_cpu(__entry->h.flags),
-                 le16_to_cpu(__entry->h.rate_ctl),
+                 le16_to_cpu(__entry->h.rate),
                  __entry->h.ack_ctl, __entry->h.wcid,
                  le16_to_cpu(__entry->h.len_ctl))
 );
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
deleted file mode 100644 (file)
index 751b49c..0000000
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "mt76x0.h"
-#include "trace.h"
-
-/* Take mac80211 Q id from the skb and translate it to hardware Q id */
-static u8 skb2q(struct sk_buff *skb)
-{
-       int qid = skb_get_queue_mapping(skb);
-
-       if (WARN_ON(qid >= MT_TXQ_PSD)) {
-               qid = MT_TXQ_BE;
-               skb_set_queue_mapping(skb, qid);
-       }
-
-       return q2hwq(qid);
-}
-
-static void mt76x0_tx_skb_remove_dma_overhead(struct sk_buff *skb,
-                                              struct ieee80211_tx_info *info)
-{
-       int pkt_len = (unsigned long)info->status.status_driver_data[0];
-
-       skb_pull(skb, sizeof(struct mt76_txwi) + 4);
-       if (ieee80211_get_hdrlen_from_skb(skb) % 4)
-               mt76x0_remove_hdr_pad(skb);
-
-       skb_trim(skb, pkt_len);
-}
-
-void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb)
-{
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
-       mt76x0_tx_skb_remove_dma_overhead(skb, info);
-
-       ieee80211_tx_info_clear_status(info);
-       info->status.rates[0].idx = -1;
-       info->flags |= IEEE80211_TX_STAT_ACK;
-
-       spin_lock(&dev->mac_lock);
-       ieee80211_tx_status(dev->mt76.hw, skb);
-       spin_unlock(&dev->mac_lock);
-}
-
-static int mt76x0_skb_rooms(struct mt76x0_dev *dev, struct sk_buff *skb)
-{
-       int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
-       u32 need_head;
-
-       need_head = sizeof(struct mt76_txwi) + 4;
-       if (hdr_len % 4)
-               need_head += 2;
-
-       return skb_cow(skb, need_head);
-}
-
-static struct mt76_txwi *
-mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb,
-                 struct ieee80211_sta *sta, struct mt76_wcid *wcid,
-                 int pkt_len)
-{
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_tx_rate *rate = &info->control.rates[0];
-       struct mt76_txwi *txwi;
-       unsigned long flags;
-       u16 txwi_flags = 0;
-       u32 pkt_id;
-       u16 rate_ctl;
-       u8 nss;
-
-       txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
-       memset(txwi, 0, sizeof(*txwi));
-
-       if (!wcid->tx_rate_set)
-               ieee80211_get_tx_rates(info->control.vif, sta, skb,
-                                      info->control.rates, 1);
-
-       spin_lock_irqsave(&dev->mt76.lock, flags);
-       if (rate->idx < 0 || !rate->count) {
-               rate_ctl = wcid->tx_rate;
-               nss = wcid->tx_rate_nss;
-       } else {
-               rate_ctl = mt76x0_mac_tx_rate_val(dev, rate, &nss);
-       }
-       spin_unlock_irqrestore(&dev->mt76.lock, flags);
-
-       txwi->rate_ctl = cpu_to_le16(rate_ctl);
-
-       if (info->flags & IEEE80211_TX_CTL_LDPC)
-               txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_LDPC);
-       if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
-               txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_STBC);
-       if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
-               txwi_flags |= MT_TXWI_FLAGS_MMPS;
-
-       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
-               txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
-               pkt_id = 1;
-       } else {
-               pkt_id = 0;
-       }
-
-       if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
-               pkt_id |= MT_TXWI_PKTID_PROBE;
-
-       if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
-               txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
-
-       if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
-               u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
-
-               ba_size <<= sta->ht_cap.ampdu_factor;
-               ba_size = min_t(int, 7, ba_size - 1);
-               if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
-                       ba_size = 0;
-               } else {
-                       txwi_flags |= MT_TXWI_FLAGS_AMPDU;
-                       txwi_flags |= FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
-                                                sta->ht_cap.ampdu_density);
-               }
-               txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
-       }
-
-       txwi->wcid = wcid->idx;
-       txwi->flags |= cpu_to_le16(txwi_flags);
-       txwi->len_ctl = cpu_to_le16(pkt_len);
-       txwi->pktid = pkt_id;
-
-       return txwi;
-}
-
-void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
-               struct sk_buff *skb)
-{
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct mt76x0_dev *dev = hw->priv;
-       struct ieee80211_vif *vif = info->control.vif;
-       struct ieee80211_sta *sta = control->sta;
-       struct mt76_sta *msta = NULL;
-       struct mt76_wcid *wcid = dev->mon_wcid;
-       struct mt76_txwi *txwi;
-       int pkt_len = skb->len;
-       int hw_q = skb2q(skb);
-
-       BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
-       info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
-
-       if (mt76x0_skb_rooms(dev, skb) || mt76x0_insert_hdr_pad(skb)) {
-               ieee80211_free_txskb(dev->mt76.hw, skb);
-               return;
-       }
-
-       if (sta) {
-               msta = (struct mt76_sta *) sta->drv_priv;
-               wcid = &msta->wcid;
-       } else if (vif && (!info->control.hw_key && wcid->hw_key_idx != -1)) {
-               struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
-
-               wcid = &mvif->group_wcid;
-       }
-
-       txwi = mt76x0_push_txwi(dev, skb, sta, wcid, pkt_len);
-
-       if (mt76x0_dma_enqueue_tx(dev, skb, wcid, hw_q))
-               return;
-
-       trace_mt76x0_tx(&dev->mt76, skb, msta, txwi);
-}
-
-void mt76x0_tx_stat(struct work_struct *work)
-{
-       struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
-                                              stat_work.work);
-       struct mt76_tx_status stat;
-       unsigned long flags;
-       int cleaned = 0;
-       u8 update = 1;
-
-       while (!test_bit(MT76_REMOVED, &dev->mt76.state)) {
-               stat = mt76x0_mac_fetch_tx_status(dev);
-               if (!stat.valid)
-                       break;
-
-               mt76x0_send_tx_status(dev, &stat, &update);
-
-               cleaned++;
-       }
-       trace_mt76x0_tx_status_cleaned(&dev->mt76, cleaned);
-
-       spin_lock_irqsave(&dev->tx_lock, flags);
-       if (cleaned)
-               queue_delayed_work(dev->stat_wq, &dev->stat_work,
-                                  msecs_to_jiffies(10));
-       else if (test_and_clear_bit(MT76_MORE_STATS, &dev->mt76.state))
-               queue_delayed_work(dev->stat_wq, &dev->stat_work,
-                                  msecs_to_jiffies(20));
-       else
-               clear_bit(MT76_READING_STATS, &dev->mt76.state);
-       spin_unlock_irqrestore(&dev->tx_lock, flags);
-}
-
-int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                   u16 queue, const struct ieee80211_tx_queue_params *params)
-{
-       struct mt76x0_dev *dev = hw->priv;
-       u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
-       u32 val;
-
-       /* TODO: should we do funny things with the parameters?
-        *       See what mt76x0_set_default_edca() used to do in init.c.
-        */
-
-       if (params->cw_min)
-               cw_min = fls(params->cw_min);
-       if (params->cw_max)
-               cw_max = fls(params->cw_max);
-
-       WARN_ON(params->txop > 0xff);
-       WARN_ON(params->aifs > 0xf);
-       WARN_ON(cw_min > 0xf);
-       WARN_ON(cw_max > 0xf);
-
-       val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
-             FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
-             FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
-       /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
-        *       a really long txop on AC0 (see connect.c:2009) but only on
-        *       connect? When not connected should be 0.
-        */
-       if (!hw_q)
-               val |= 0x60;
-       else
-               val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
-       mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
-
-       val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
-       val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
-       val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
-       mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
-
-       val = mt76_rr(dev, MT_WMM_AIFSN);
-       val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
-       val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
-       mt76_wr(dev, MT_WMM_AIFSN, val);
-
-       val = mt76_rr(dev, MT_WMM_CWMIN);
-       val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
-       val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
-       mt76_wr(dev, MT_WMM_CWMIN, val);
-
-       val = mt76_rr(dev, MT_WMM_CWMAX);
-       val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
-       val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
-       mt76_wr(dev, MT_WMM_CWMAX, val);
-
-       return 0;
-}
index 54ae1f113be23dd51b1ab7fafb79bd3d991af893..a7fd36c2f63301bcb1cd810036c0f96d6cccbe79 100644 (file)
@@ -16,8 +16,9 @@
 #include <linux/usb.h>
 
 #include "mt76x0.h"
-#include "usb.h"
+#include "mcu.h"
 #include "trace.h"
+#include "../mt76x02_usb.h"
 
 static struct usb_device_id mt76x0_device_table[] = {
        { USB_DEVICE(0x148F, 0x7610) }, /* MT7610U */
@@ -40,256 +41,215 @@ static struct usb_device_id mt76x0_device_table[] = {
        { USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH  */
        { USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */
        { USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac  Stick */
-       { USB_DEVICE(0x2357, 0x0105) }, /* TP-LINK Archer T1U */
+       { USB_DEVICE(0x2357, 0x0105),
+         .driver_info = 1,          }, /* TP-LINK Archer T1U */
        { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)}, /* MT7630U */
        { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)}, /* MT7650U */
        { 0, }
 };
 
-bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
-                          struct mt76x0_dma_buf *buf)
+static void mt76x0_init_usb_dma(struct mt76x02_dev *dev)
 {
-       struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+       u32 val;
 
-       buf->len = len;
-       buf->urb = usb_alloc_urb(0, GFP_KERNEL);
-       buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
+       val = mt76_rr(dev, MT_USB_DMA_CFG);
 
-       return !buf->urb || !buf->buf;
-}
-
-void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf)
-{
-       struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
-
-       usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
-       usb_free_urb(buf->urb);
-}
-
-int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
-                          struct mt76x0_dma_buf *buf, gfp_t gfp,
-                          usb_complete_t complete_fn, void *context)
-{
-       struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
-       unsigned pipe;
-       int ret;
-
-       if (dir == USB_DIR_IN)
-               pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[ep_idx]);
-       else
-               pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep_idx]);
-
-       usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
-                         complete_fn, context);
-       buf->urb->transfer_dma = buf->dma;
-       buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
-       trace_mt76x0_submit_urb(&dev->mt76, buf->urb);
-       ret = usb_submit_urb(buf->urb, gfp);
-       if (ret)
-               dev_err(dev->mt76.dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
-                       dir, ep_idx, ret);
-       return ret;
-}
-
-void mt76x0_complete_urb(struct urb *urb)
-{
-       struct completion *cmpl = urb->context;
+       val |= MT_USB_DMA_CFG_RX_BULK_EN |
+              MT_USB_DMA_CFG_TX_BULK_EN;
 
-       complete(cmpl);
-}
+       /* disable AGGR_BULK_RX in order to receive one
+        * frame in each rx urb and avoid copies
+        */
+       val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+       mt76_wr(dev, MT_USB_DMA_CFG, val);
 
-int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
-                          const u8 direction, const u16 val, const u16 offset,
-                          void *buf, const size_t buflen)
-{
-       int i, ret;
-       struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
-       const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-       const unsigned int pipe = (direction == USB_DIR_IN) ?
-               usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
-
-       for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
-               ret = usb_control_msg(usb_dev, pipe, req, req_type,
-                                     val, offset, buf, buflen,
-                                     MT_VEND_REQ_TOUT_MS);
-               trace_mt76x0_vend_req(&dev->mt76, pipe, req, req_type, val, offset,
-                                 buf, buflen, ret);
-
-               if (ret == -ENODEV)
-                       set_bit(MT76_REMOVED, &dev->mt76.state);
-               if (ret >= 0 || ret == -ENODEV)
-                       return ret;
-
-               msleep(5);
-       }
+       val = mt76_rr(dev, MT_COM_REG0);
+       if (val & 1)
+               dev_dbg(dev->mt76.dev, "MCU not ready\n");
 
-       dev_err(dev->mt76.dev, "Vendor request req:%02x off:%04x failed:%d\n",
-               req, offset, ret);
+       val = mt76_rr(dev, MT_USB_DMA_CFG);
 
-       return ret;
+       val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD;
+       mt76_wr(dev, MT_USB_DMA_CFG, val);
+       val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PAD;
+       mt76_wr(dev, MT_USB_DMA_CFG, val);
 }
 
-void mt76x0_vendor_reset(struct mt76x0_dev *dev)
+static void mt76x0u_cleanup(struct mt76x02_dev *dev)
 {
-       mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
-                             MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
+       clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+       mt76x0_chip_onoff(dev, false, false);
+       mt76u_queues_deinit(&dev->mt76);
+       mt76u_mcu_deinit(&dev->mt76);
 }
 
-static u32 mt76x0_rr(struct mt76_dev *dev, u32 offset)
+static void mt76x0u_mac_stop(struct mt76x02_dev *dev)
 {
-       struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
-       int ret;
-       u32 val = ~0;
+       clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+       cancel_delayed_work_sync(&dev->cal_work);
+       cancel_delayed_work_sync(&dev->mac_work);
+       mt76u_stop_stat_wk(&dev->mt76);
 
-       WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+       if (test_bit(MT76_REMOVED, &dev->mt76.state))
+               return;
 
-       mutex_lock(&mdev->usb_ctrl_mtx);
+       mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
+                  MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
+                  MT_BEACON_TIME_CFG_BEACON_TX);
 
-       ret = mt76x0_vendor_request((struct mt76x0_dev *)dev, MT_VEND_MULTI_READ, USB_DIR_IN,
-                                   0, offset, mdev->data, MT_VEND_BUF);
-       if (ret == MT_VEND_BUF)
-               val = get_unaligned_le32(mdev->data);
-       else if (ret > 0)
-               dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
-                       ret, offset);
+       if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+               dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
 
-       mutex_unlock(&mdev->usb_ctrl_mtx);
+       mt76x0_mac_stop(dev);
 
-       trace_mt76x0_reg_read(dev, offset, val);
-       return val;
+       if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+               dev_warn(dev->mt76.dev, "RX DMA did not stop\n");
 }
 
-int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
-                            const u16 offset, const u32 val)
+static int mt76x0u_start(struct ieee80211_hw *hw)
 {
-       struct mt76x0_dev *mdev = dev;
+       struct mt76x02_dev *dev = hw->priv;
        int ret;
 
-       mutex_lock(&mdev->usb_ctrl_mtx);
+       mutex_lock(&dev->mt76.mutex);
 
-       ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
-                                   val & 0xffff, offset, NULL, 0);
-       if (!ret)
-               ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
-                                           val >> 16, offset + 2, NULL, 0);
+       ret = mt76x0_mac_start(dev);
+       if (ret)
+               goto out;
 
-       mutex_unlock(&mdev->usb_ctrl_mtx);
+       ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
+                                    MT_CALIBRATE_INTERVAL);
+       ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+                                    MT_CALIBRATE_INTERVAL);
+       set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
 
+out:
+       mutex_unlock(&dev->mt76.mutex);
        return ret;
 }
 
-static void mt76x0_wr(struct mt76_dev *dev, u32 offset, u32 val)
+static void mt76x0u_stop(struct ieee80211_hw *hw)
 {
-       struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
-       int ret;
+       struct mt76x02_dev *dev = hw->priv;
 
-       WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
-
-       mutex_lock(&mdev->usb_ctrl_mtx);
-
-       put_unaligned_le32(val, mdev->data);
-       ret = mt76x0_vendor_request(mdev, MT_VEND_MULTI_WRITE, USB_DIR_OUT,
-                                   0, offset, mdev->data, MT_VEND_BUF);
-       trace_mt76x0_reg_write(dev, offset, val);
-
-       mutex_unlock(&mdev->usb_ctrl_mtx);
+       mutex_lock(&dev->mt76.mutex);
+       mt76x0u_mac_stop(dev);
+       mutex_unlock(&dev->mt76.mutex);
 }
 
-static u32 mt76x0_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
-{
-       val |= mt76x0_rr(dev, offset) & ~mask;
-       mt76x0_wr(dev, offset, val);
-       return val;
-}
+static const struct ieee80211_ops mt76x0u_ops = {
+       .tx = mt76x02_tx,
+       .start = mt76x0u_start,
+       .stop = mt76x0u_stop,
+       .add_interface = mt76x02_add_interface,
+       .remove_interface = mt76x02_remove_interface,
+       .config = mt76x0_config,
+       .configure_filter = mt76x02_configure_filter,
+       .bss_info_changed = mt76x0_bss_info_changed,
+       .sta_add = mt76x02_sta_add,
+       .sta_remove = mt76x02_sta_remove,
+       .set_key = mt76x02_set_key,
+       .conf_tx = mt76x02_conf_tx,
+       .sw_scan_start = mt76x0_sw_scan,
+       .sw_scan_complete = mt76x0_sw_scan_complete,
+       .ampdu_action = mt76x02_ampdu_action,
+       .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+       .set_rts_threshold = mt76x0_set_rts_threshold,
+       .wake_tx_queue = mt76_wake_tx_queue,
+};
 
-static void mt76x0_wr_copy(struct mt76_dev *dev, u32 offset,
-                          const void *data, int len)
+static int mt76x0u_register_device(struct mt76x02_dev *dev)
 {
-       WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
-       WARN_ONCE(len & 3, "short write copy off:%08x", offset);
+       struct ieee80211_hw *hw = dev->mt76.hw;
+       int err;
 
-       mt76x0_burst_write_regs((struct mt76x0_dev *) dev, offset, data, len / 4);
-}
+       err = mt76u_alloc_queues(&dev->mt76);
+       if (err < 0)
+               goto out_err;
 
-void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr)
-{
-       mt76_wr(dev, offset, get_unaligned_le32(addr));
-       mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
-}
+       err = mt76u_mcu_init_rx(&dev->mt76);
+       if (err < 0)
+               goto out_err;
 
-static int mt76x0_assign_pipes(struct usb_interface *usb_intf,
-                               struct mt76x0_dev *dev)
-{
-       struct usb_endpoint_descriptor *ep_desc;
-       struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
-       unsigned i, ep_i = 0, ep_o = 0;
-
-       BUILD_BUG_ON(sizeof(dev->in_ep) < __MT_EP_IN_MAX);
-       BUILD_BUG_ON(sizeof(dev->out_ep) < __MT_EP_OUT_MAX);
-
-       for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
-               ep_desc = &intf_desc->endpoint[i].desc;
-
-               if (usb_endpoint_is_bulk_in(ep_desc) &&
-                   ep_i++ < __MT_EP_IN_MAX) {
-                       dev->in_ep[ep_i - 1] = usb_endpoint_num(ep_desc);
-                       dev->in_max_packet = usb_endpoint_maxp(ep_desc);
-                       /* Note: this is ignored by usb sub-system but vendor
-                        *       code does it. We can drop this at some point.
-                        */
-                       dev->in_ep[ep_i - 1] |= USB_DIR_IN;
-               } else if (usb_endpoint_is_bulk_out(ep_desc) &&
-                          ep_o++ < __MT_EP_OUT_MAX) {
-                       dev->out_ep[ep_o - 1] = usb_endpoint_num(ep_desc);
-                       dev->out_max_packet = usb_endpoint_maxp(ep_desc);
-               }
+       mt76x0_chip_onoff(dev, true, true);
+       if (!mt76x02_wait_for_mac(&dev->mt76)) {
+               err = -ETIMEDOUT;
+               goto out_err;
        }
 
-       if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
-               dev_err(dev->mt76.dev, "Error: wrong pipe number in:%d out:%d\n",
-                       ep_i, ep_o);
-               return -EINVAL;
-       }
+       err = mt76x0u_mcu_init(dev);
+       if (err < 0)
+               goto out_err;
+
+       mt76x0_init_usb_dma(dev);
+       err = mt76x0_init_hardware(dev);
+       if (err < 0)
+               goto out_err;
+
+       mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+       mt76_wr(dev, MT_TXOP_CTRL_CFG,
+               FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+               FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+       err = mt76x0_register_device(dev);
+       if (err < 0)
+               goto out_err;
+
+       /* check hw sg support in order to enable AMSDU */
+       if (mt76u_check_sg(&dev->mt76))
+               hw->max_tx_fragments = MT_SG_MAX_SIZE;
+       else
+               hw->max_tx_fragments = 1;
+
+       set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
 
        return 0;
+
+out_err:
+       mt76x0u_cleanup(dev);
+       return err;
 }
 
-static int mt76x0_probe(struct usb_interface *usb_intf,
+static int mt76x0u_probe(struct usb_interface *usb_intf,
                         const struct usb_device_id *id)
 {
+       static const struct mt76_driver_ops drv_ops = {
+               .tx_prepare_skb = mt76x02u_tx_prepare_skb,
+               .tx_complete_skb = mt76x02u_tx_complete_skb,
+               .tx_status_data = mt76x02_tx_status_data,
+               .rx_skb = mt76x02_queue_rx_skb,
+       };
        struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
-       struct mt76x0_dev *dev;
+       struct mt76x02_dev *dev;
        u32 asic_rev, mac_rev;
        int ret;
-       static const struct mt76_bus_ops usb_ops = {
-               .rr = mt76x0_rr,
-               .wr = mt76x0_wr,
-               .rmw = mt76x0_rmw,
-               .copy = mt76x0_wr_copy,
-       };
 
-       dev = mt76x0_alloc_device(&usb_intf->dev);
+       dev = mt76x0_alloc_device(&usb_intf->dev, &drv_ops,
+                                 &mt76x0u_ops);
        if (!dev)
                return -ENOMEM;
 
+       /* Quirk for Archer T1U */
+       if (id->driver_info)
+               dev->no_2ghz = true;
+
        usb_dev = usb_get_dev(usb_dev);
        usb_reset_device(usb_dev);
 
        usb_set_intfdata(usb_intf, dev);
 
-       dev->mt76.bus = &usb_ops;
-
-       ret = mt76x0_assign_pipes(usb_intf, dev);
+       mt76x02u_init_mcu(&dev->mt76);
+       ret = mt76u_init(&dev->mt76, usb_intf);
        if (ret)
                goto err;
 
        /* Disable the HW, otherwise MCU fail to initalize on hot reboot */
        mt76x0_chip_onoff(dev, false, false);
 
-       ret = mt76x0_wait_asic_ready(dev);
-       if (ret)
+       if (!mt76x02_wait_for_mac(&dev->mt76)) {
+               ret = -ETIMEDOUT;
                goto err;
+       }
 
        asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
        mac_rev = mt76_rr(dev, MT_MAC_CSR0);
@@ -300,77 +260,89 @@ static int mt76x0_probe(struct usb_interface *usb_intf,
        if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
                dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n");
 
-       ret = mt76x0_init_hardware(dev);
-       if (ret)
+       ret = mt76x0u_register_device(dev);
+       if (ret < 0)
                goto err;
 
-       ret = mt76x0_register_device(dev);
-       if (ret)
-               goto err_hw;
-
-       set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
-
        return 0;
-err_hw:
-       mt76x0_cleanup(dev);
+
 err:
        usb_set_intfdata(usb_intf, NULL);
        usb_put_dev(interface_to_usbdev(usb_intf));
 
-       destroy_workqueue(dev->stat_wq);
        ieee80211_free_hw(dev->mt76.hw);
        return ret;
 }
 
 static void mt76x0_disconnect(struct usb_interface *usb_intf)
 {
-       struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+       struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
        bool initalized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
 
        if (!initalized)
                return;
 
        ieee80211_unregister_hw(dev->mt76.hw);
-       mt76x0_cleanup(dev);
+       mt76x0u_cleanup(dev);
 
        usb_set_intfdata(usb_intf, NULL);
        usb_put_dev(interface_to_usbdev(usb_intf));
 
-       destroy_workqueue(dev->stat_wq);
        ieee80211_free_hw(dev->mt76.hw);
 }
 
-static int mt76x0_suspend(struct usb_interface *usb_intf, pm_message_t state)
+static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
+                                        pm_message_t state)
 {
-       struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+       struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
+       struct mt76_usb *usb = &dev->mt76.usb;
 
-       mt76x0_cleanup(dev);
+       mt76u_stop_queues(&dev->mt76);
+       mt76x0u_mac_stop(dev);
+       usb_kill_urb(usb->mcu.res.urb);
 
        return 0;
 }
 
-static int mt76x0_resume(struct usb_interface *usb_intf)
+static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
 {
-       struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+       struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
+       struct mt76_usb *usb = &dev->mt76.usb;
        int ret;
 
+       reinit_completion(&usb->mcu.cmpl);
+       ret = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
+                              MT_EP_IN_CMD_RESP,
+                              &usb->mcu.res, GFP_KERNEL,
+                              mt76u_mcu_complete_urb,
+                              &usb->mcu.cmpl);
+       if (ret < 0)
+               goto err;
+
+       ret = mt76u_submit_rx_buffers(&dev->mt76);
+       if (ret < 0)
+               goto err;
+
+       tasklet_enable(&usb->rx_tasklet);
+       tasklet_enable(&usb->tx_tasklet);
+
        ret = mt76x0_init_hardware(dev);
        if (ret)
-               return ret;
-
-       set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+               goto err;
 
        return 0;
+err:
+       mt76x0u_cleanup(dev);
+       return ret;
 }
 
 MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
-MODULE_FIRMWARE(MT7610_FIRMWARE);
 MODULE_LICENSE("GPL");
 
 static struct usb_driver mt76x0_driver = {
        .name           = KBUILD_MODNAME,
        .id_table       = mt76x0_device_table,
-       .probe          = mt76x0_probe,
+       .probe          = mt76x0u_probe,
        .disconnect     = mt76x0_disconnect,
        .suspend        = mt76x0_suspend,
        .resume         = mt76x0_resume,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
deleted file mode 100644 (file)
index 492e431..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MT76X0U_USB_H
-#define __MT76X0U_USB_H
-
-#include "mt76x0.h"
-
-#define MT7610_FIRMWARE        "mediatek/mt7610u.bin"
-
-#define MT_VEND_REQ_MAX_RETRY  10
-#define MT_VEND_REQ_TOUT_MS    300
-
-#define MT_VEND_DEV_MODE_RESET 1
-
-#define MT_VEND_BUF            sizeof(__le32)
-
-static inline struct usb_device *mt76x0_to_usb_dev(struct mt76x0_dev *mt76x0)
-{
-       return interface_to_usbdev(to_usb_interface(mt76x0->mt76.dev));
-}
-
-static inline struct usb_device *mt76_to_usb_dev(struct mt76_dev *mt76)
-{
-       return interface_to_usbdev(to_usb_interface(mt76->dev));
-}
-
-static inline bool mt76x0_urb_has_error(struct urb *urb)
-{
-       return urb->status &&
-               urb->status != -ENOENT &&
-               urb->status != -ECONNRESET &&
-               urb->status != -ESHUTDOWN;
-}
-
-bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
-                          struct mt76x0_dma_buf *buf);
-void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf);
-int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
-                          struct mt76x0_dma_buf *buf, gfp_t gfp,
-                          usb_complete_t complete_fn, void *context);
-void mt76x0_complete_urb(struct urb *urb);
-
-int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
-                          const u8 direction, const u16 val, const u16 offset,
-                          void *buf, const size_t buflen);
-void mt76x0_vendor_reset(struct mt76x0_dev *dev);
-int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
-                            const u16 offset, const u32 val);
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
new file mode 100644 (file)
index 0000000..fb6fa1f
--- /dev/null
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+#include "../mt76x02_usb.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD         0x38f8
+#define MCU_FW_URB_SIZE                        (MCU_FW_URB_MAX_PAYLOAD + 12)
+#define MT7610U_FIRMWARE               "mediatek/mt7610u.bin"
+
+static int
+mt76x0u_upload_firmware(struct mt76x02_dev *dev,
+                       const struct mt76x02_fw_header *hdr)
+{
+       u8 *fw_payload = (u8 *)(hdr + 1);
+       u32 ilm_len, dlm_len;
+       void *ivb;
+       int err;
+
+       ivb = kmemdup(fw_payload, MT_MCU_IVB_SIZE, GFP_KERNEL);
+       if (!ivb)
+               return -ENOMEM;
+
+       ilm_len = le32_to_cpu(hdr->ilm_len) - MT_MCU_IVB_SIZE;
+       dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %u\n",
+               ilm_len, MT_MCU_IVB_SIZE);
+       err = mt76x02u_mcu_fw_send_data(&dev->mt76,
+                                       fw_payload + MT_MCU_IVB_SIZE,
+                                       ilm_len, MCU_FW_URB_MAX_PAYLOAD,
+                                       MT_MCU_IVB_SIZE);
+       if (err)
+               goto out;
+
+       dlm_len = le32_to_cpu(hdr->dlm_len);
+       dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
+       err = mt76x02u_mcu_fw_send_data(&dev->mt76,
+                                       fw_payload + le32_to_cpu(hdr->ilm_len),
+                                       dlm_len, MCU_FW_URB_MAX_PAYLOAD,
+                                       MT_MCU_DLM_OFFSET);
+       if (err)
+               goto out;
+
+       err = mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+                                  USB_DIR_OUT | USB_TYPE_VENDOR,
+                                  0x12, 0, ivb, MT_MCU_IVB_SIZE);
+       if (err < 0)
+               goto out;
+
+       if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 1000)) {
+               dev_err(dev->mt76.dev, "Firmware failed to start\n");
+               err = -ETIMEDOUT;
+               goto out;
+       }
+
+       dev_dbg(dev->mt76.dev, "Firmware running!\n");
+
+out:
+       kfree(ivb);
+
+       return err;
+}
+
+static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
+{
+       const struct firmware *fw;
+       const struct mt76x02_fw_header *hdr;
+       int len, ret;
+       u32 val;
+
+       mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+                                     MT_USB_DMA_CFG_TX_BULK_EN));
+
+       if (mt76x0_firmware_running(dev))
+               return 0;
+
+       ret = request_firmware(&fw, MT7610U_FIRMWARE, dev->mt76.dev);
+       if (ret)
+               return ret;
+
+       if (!fw || !fw->data || fw->size < sizeof(*hdr))
+               goto err_inv_fw;
+
+       hdr = (const struct mt76x02_fw_header *)fw->data;
+
+       if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+               goto err_inv_fw;
+
+       len = sizeof(*hdr);
+       len += le32_to_cpu(hdr->ilm_len);
+       len += le32_to_cpu(hdr->dlm_len);
+
+       if (fw->size != len)
+               goto err_inv_fw;
+
+       val = le16_to_cpu(hdr->fw_ver);
+       dev_dbg(dev->mt76.dev,
+               "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+               (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+               le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+       len = le32_to_cpu(hdr->ilm_len);
+
+       mt76_wr(dev, 0x1004, 0x2c);
+
+       mt76_set(dev, MT_USB_DMA_CFG,
+                (MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN) |
+                FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
+       mt76x02u_mcu_fw_reset(&dev->mt76);
+       usleep_range(5000, 6000);
+/*
+       mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+                                        MT_PBF_CFG_TX1Q_EN |
+                                        MT_PBF_CFG_TX2Q_EN |
+                                        MT_PBF_CFG_TX3Q_EN));
+*/
+
+       mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+       /* FCE tx_fs_base_ptr */
+       mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+       /* FCE tx_fs_max_cnt */
+       mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+       /* FCE pdma enable */
+       mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+       /* FCE skip_fs_en */
+       mt76_wr(dev, MT_FCE_SKIP_FS, 3);
+
+       val = mt76_rr(dev, MT_USB_DMA_CFG);
+       val |= MT_USB_DMA_CFG_UDMA_TX_WL_DROP;
+       mt76_wr(dev, MT_USB_DMA_CFG, val);
+       val &= ~MT_USB_DMA_CFG_UDMA_TX_WL_DROP;
+       mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+       ret = mt76x0u_upload_firmware(dev, hdr);
+       release_firmware(fw);
+
+       mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+       return ret;
+
+err_inv_fw:
+       dev_err(dev->mt76.dev, "Invalid firmware image\n");
+       release_firmware(fw);
+       return -ENOENT;
+}
+
+int mt76x0u_mcu_init(struct mt76x02_dev *dev)
+{
+       int ret;
+
+       ret = mt76x0u_load_firmware(dev);
+       if (ret < 0)
+               return ret;
+
+       set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+       return 0;
+}
+
+MODULE_FIRMWARE(MT7610U_FIRMWARE);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
deleted file mode 100644 (file)
index 7856dd7..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include "mt76x0.h"
-
-void mt76x0_remove_hdr_pad(struct sk_buff *skb)
-{
-       int len = ieee80211_get_hdrlen_from_skb(skb);
-
-       memmove(skb->data + 2, skb->data, len);
-       skb_pull(skb, 2);
-}
-
-int mt76x0_insert_hdr_pad(struct sk_buff *skb)
-{
-       int len = ieee80211_get_hdrlen_from_skb(skb);
-       int ret;
-
-       if (len % 4 == 0)
-               return 0;
-
-       ret = skb_cow(skb, 2);
-       if (ret)
-               return ret;
-
-       skb_push(skb, 2);
-       memmove(skb->data, skb->data + 2, len);
-
-       skb->data[len] = 0;
-       skb->data[len + 1] = 0;
-       return 0;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
new file mode 100644 (file)
index 0000000..6517481
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76X02_UTIL_H
+#define __MT76X02_UTIL_H
+
+#include <linux/kfifo.h>
+
+#include "mt76.h"
+#include "mt76x02_regs.h"
+#include "mt76x02_mac.h"
+#include "mt76x02_dfs.h"
+#include "mt76x02_dma.h"
+
+struct mt76x02_mac_stats {
+       u64 rx_stat[6];
+       u64 tx_stat[6];
+       u64 aggr_stat[2];
+       u64 aggr_n[32];
+       u64 zero_len_del[2];
+};
+
+#define MT_MAX_CHAINS          2
+struct mt76x02_rx_freq_cal {
+       s8 high_gain[MT_MAX_CHAINS];
+       s8 rssi_offset[MT_MAX_CHAINS];
+       s8 lna_gain;
+       u32 mcu_gain;
+       s16 temp_offset;
+       u8 freq_offset;
+};
+
+struct mt76x02_calibration {
+       struct mt76x02_rx_freq_cal rx;
+
+       u8 agc_gain_init[MT_MAX_CHAINS];
+       u8 agc_gain_cur[MT_MAX_CHAINS];
+
+       u16 false_cca;
+       s8 avg_rssi_all;
+       s8 agc_gain_adjust;
+       s8 low_gain;
+
+       u8 temp;
+
+       bool init_cal_done;
+       bool tssi_cal_done;
+       bool tssi_comp_pending;
+       bool dpd_cal_done;
+       bool channel_cal_done;
+};
+
+struct mt76x02_dev {
+       struct mt76_dev mt76; /* must be first */
+
+       struct mac_address macaddr_list[8];
+
+       struct mutex phy_mutex;
+       struct mutex mutex;
+
+       u8 txdone_seq;
+       DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
+
+       struct sk_buff *rx_head;
+
+       struct tasklet_struct tx_tasklet;
+       struct tasklet_struct pre_tbtt_tasklet;
+       struct delayed_work cal_work;
+       struct delayed_work mac_work;
+
+       struct mt76x02_mac_stats stats;
+       atomic_t avg_ampdu_len;
+       u32 aggr_stats[32];
+
+       struct sk_buff *beacons[8];
+       u8 beacon_mask;
+       u8 beacon_data_mask;
+
+       u8 tbtt_count;
+       u16 beacon_int;
+
+       struct mt76x02_calibration cal;
+
+       s8 target_power;
+       s8 target_power_delta[2];
+       bool enable_tpc;
+
+       bool no_2ghz;
+
+       u8 agc_save;
+
+       u8 coverage_class;
+       u8 slottime;
+
+       struct mt76x02_dfs_pattern_detector dfs_pd;
+};
+
+extern struct ieee80211_rate mt76x02_rates[12];
+
+void mt76x02_configure_filter(struct ieee80211_hw *hw,
+                            unsigned int changed_flags,
+                            unsigned int *total_flags, u64 multicast);
+int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                  struct ieee80211_sta *sta);
+int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                     struct ieee80211_sta *sta);
+
+void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif,
+                    unsigned int idx);
+int mt76x02_add_interface(struct ieee80211_hw *hw,
+                        struct ieee80211_vif *vif);
+void mt76x02_remove_interface(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif);
+
+int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                       struct ieee80211_ampdu_params *params);
+int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                  struct ieee80211_key_conf *key);
+int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                  u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_sta *sta);
+s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
+                               const struct ieee80211_tx_rate *rate);
+s8 mt76x02_tx_get_txpwr_adj(struct mt76_dev *mdev, s8 txpwr, s8 max_txpwr_adj);
+void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
+int mt76x02_insert_hdr_pad(struct sk_buff *skb);
+void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
+void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb);
+bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update);
+void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+                         struct sk_buff *skb);
+void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
+void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+               struct sk_buff *skb);
+int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+                          struct sk_buff *skb, struct mt76_queue *q,
+                          struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+                          u32 *tx_info);
+
+extern const u16 mt76x02_beacon_offsets[16];
+void mt76x02_set_beacon_offsets(struct mt76_dev *dev);
+void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set);
+void mt76x02_mac_start(struct mt76x02_dev *dev);
+
+static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask)
+{
+       mt76x02_set_irq_mask(dev, 0, mask);
+}
+
+static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask)
+{
+       mt76x02_set_irq_mask(dev, mask, 0);
+}
+
+static inline bool
+mt76x02_wait_for_txrx_idle(struct mt76_dev *dev)
+{
+       return __mt76_poll_msec(dev, MT_MAC_STATUS,
+                               MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+                               0, 100);
+}
+
+static inline struct mt76x02_sta *
+mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx)
+{
+       struct mt76_wcid *wcid;
+
+       if (idx >= ARRAY_SIZE(dev->wcid))
+               return NULL;
+
+       wcid = rcu_dereference(dev->wcid[idx]);
+       if (!wcid)
+               return NULL;
+
+       return container_of(wcid, struct mt76x02_sta, wcid);
+}
+
+static inline struct mt76_wcid *
+mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast)
+{
+       if (!sta)
+               return NULL;
+
+       if (unicast)
+               return &sta->wcid;
+       else
+               return &sta->vif->group_wcid;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
new file mode 100644 (file)
index 0000000..7e177c9
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x02_DFS_H
+#define __MT76x02_DFS_H
+
+#include <linux/types.h>
+#include <linux/nl80211.h>
+
+#define MT_DFS_GP_INTERVAL             (10 << 4) /* 64 us unit */
+#define MT_DFS_NUM_ENGINES             4
+
+/* bbp params */
+#define MT_DFS_SYM_ROUND               0
+#define MT_DFS_DELTA_DELAY             2
+#define MT_DFS_VGA_MASK                        0
+#define MT_DFS_PWR_GAIN_OFFSET         3
+#define MT_DFS_PWR_DOWN_TIME           0xf
+#define MT_DFS_RX_PE_MASK              0xff
+#define MT_DFS_PKT_END_MASK            0
+#define MT_DFS_CH_EN                   0xf
+
+/* sw detector params */
+#define MT_DFS_EVENT_LOOP              64
+#define MT_DFS_SW_TIMEOUT              (HZ / 20)
+#define MT_DFS_EVENT_WINDOW            (HZ / 5)
+#define MT_DFS_SEQUENCE_WINDOW         (200 * (1 << 20))
+#define MT_DFS_EVENT_TIME_MARGIN       2000
+#define MT_DFS_PRI_MARGIN              4
+#define MT_DFS_SEQUENCE_TH             6
+
+#define MT_DFS_FCC_MAX_PRI             ((28570 << 1) + 1000)
+#define MT_DFS_FCC_MIN_PRI             (3000 - 2)
+#define MT_DFS_JP_MAX_PRI              ((80000 << 1) + 1000)
+#define MT_DFS_JP_MIN_PRI              (28500 - 2)
+#define MT_DFS_ETSI_MAX_PRI            (133333 + 125000 + 117647 + 1000)
+#define MT_DFS_ETSI_MIN_PRI            (4500 - 20)
+
+struct mt76x02_radar_specs {
+       u8 mode;
+       u16 avg_len;
+       u16 e_low;
+       u16 e_high;
+       u16 w_low;
+       u16 w_high;
+       u16 w_margin;
+       u32 t_low;
+       u32 t_high;
+       u16 t_margin;
+       u32 b_low;
+       u32 b_high;
+       u32 event_expiration;
+       u16 pwr_jmp;
+};
+
+#define MT_DFS_CHECK_EVENT(x)          ((x) != GENMASK(31, 0))
+#define MT_DFS_EVENT_ENGINE(x)         (((x) & BIT(31)) ? 2 : 0)
+#define MT_DFS_EVENT_TIMESTAMP(x)      ((x) & GENMASK(21, 0))
+#define MT_DFS_EVENT_WIDTH(x)          ((x) & GENMASK(11, 0))
+struct mt76x02_dfs_event {
+       unsigned long fetch_ts;
+       u32 ts;
+       u16 width;
+       u8 engine;
+};
+
+#define MT_DFS_EVENT_BUFLEN            256
+struct mt76x02_dfs_event_rb {
+       struct mt76x02_dfs_event data[MT_DFS_EVENT_BUFLEN];
+       int h_rb, t_rb;
+};
+
+struct mt76x02_dfs_sequence {
+       struct list_head head;
+       u32 first_ts;
+       u32 last_ts;
+       u32 pri;
+       u16 count;
+       u8 engine;
+};
+
+struct mt76x02_dfs_hw_pulse {
+       u8 engine;
+       u32 period;
+       u32 w1;
+       u32 w2;
+       u32 burst;
+};
+
+struct mt76x02_dfs_sw_detector_params {
+       u32 min_pri;
+       u32 max_pri;
+       u32 pri_margin;
+};
+
+struct mt76x02_dfs_engine_stats {
+       u32 hw_pattern;
+       u32 hw_pulse_discarded;
+       u32 sw_pattern;
+};
+
+struct mt76x02_dfs_seq_stats {
+       u32 seq_pool_len;
+       u32 seq_len;
+};
+
+struct mt76x02_dfs_pattern_detector {
+       enum nl80211_dfs_regions region;
+
+       u8 chirp_pulse_cnt;
+       u32 chirp_pulse_ts;
+
+       struct mt76x02_dfs_sw_detector_params sw_dpd_params;
+       struct mt76x02_dfs_event_rb event_rb[2];
+
+       struct list_head sequences;
+       struct list_head seq_pool;
+       struct mt76x02_dfs_seq_stats seq_stats;
+
+       unsigned long last_sw_check;
+       u32 last_event_ts;
+
+       struct mt76x02_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
+       struct tasklet_struct dfs_tasklet;
+};
+
+#endif /* __MT76x02_DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
new file mode 100644 (file)
index 0000000..6394010
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x02_DMA_H
+#define __MT76x02_DMA_H
+
+#include "mt76x02.h"
+#include "dma.h"
+
+#define MT_TXD_INFO_LEN                        GENMASK(15, 0)
+#define MT_TXD_INFO_NEXT_VLD           BIT(16)
+#define MT_TXD_INFO_TX_BURST           BIT(17)
+#define MT_TXD_INFO_80211              BIT(19)
+#define MT_TXD_INFO_TSO                        BIT(20)
+#define MT_TXD_INFO_CSO                        BIT(21)
+#define MT_TXD_INFO_WIV                        BIT(24)
+#define MT_TXD_INFO_QSEL               GENMASK(26, 25)
+#define MT_TXD_INFO_DPORT              GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE               GENMASK(31, 30)
+
+#define MT_RX_FCE_INFO_LEN             GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN                BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ         GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE                GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR       BIT(24)
+#define MT_RX_FCE_INFO_QSEL            GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT          GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE            GENMASK(31, 30)
+
+/* MCU request message header  */
+#define MT_MCU_MSG_LEN                 GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ             GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE            GENMASK(26, 20)
+#define MT_MCU_MSG_PORT                        GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE                        GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD            BIT(30)
+
+#define MT_RX_HEADROOM                 32
+#define MT76X02_RX_RING_SIZE           256
+
+enum dma_msg_port {
+       WLAN_PORT,
+       CPU_RX_PORT,
+       CPU_TX_PORT,
+       HOST_PORT,
+       VIRTUAL_CPU_RX_PORT,
+       VIRTUAL_CPU_TX_PORT,
+       DISCARD,
+};
+
+static inline bool
+mt76x02_wait_for_wpdma(struct mt76_dev *dev, int timeout)
+{
+       return __mt76_poll(dev, MT_WPDMA_GLO_CFG,
+                          MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+                          MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+                          0, timeout);
+}
+
+int mt76x02_dma_init(struct mt76x02_dev *dev);
+void mt76x02_dma_disable(struct mt76x02_dev *dev);
+void mt76x02_dma_cleanup(struct mt76x02_dev *dev);
+
+#endif /* __MT76x02_DMA_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
new file mode 100644 (file)
index 0000000..d3efeb8
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+
+#include "mt76.h"
+#include "mt76x02_eeprom.h"
+#include "mt76x02_regs.h"
+
+static int
+mt76x02_efuse_read(struct mt76_dev *dev, u16 addr, u8 *data,
+                  enum mt76x02_eeprom_modes mode)
+{
+       u32 val;
+       int i;
+
+       val = __mt76_rr(dev, MT_EFUSE_CTRL);
+       val &= ~(MT_EFUSE_CTRL_AIN |
+                MT_EFUSE_CTRL_MODE);
+       val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+       val |= FIELD_PREP(MT_EFUSE_CTRL_MODE, mode);
+       val |= MT_EFUSE_CTRL_KICK;
+       __mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+       if (!__mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK,
+                             0, 1000))
+               return -ETIMEDOUT;
+
+       udelay(2);
+
+       val = __mt76_rr(dev, MT_EFUSE_CTRL);
+       if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+               memset(data, 0xff, 16);
+               return 0;
+       }
+
+       for (i = 0; i < 4; i++) {
+               val = __mt76_rr(dev, MT_EFUSE_DATA(i));
+               put_unaligned_le32(val, data + 4 * i);
+       }
+
+       return 0;
+}
+
+int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf,
+                          int len, enum mt76x02_eeprom_modes mode)
+{
+       int ret, i;
+
+       for (i = 0; i + 16 <= len; i += 16) {
+               ret = mt76x02_efuse_read(dev, base + i, buf + i, mode);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_efuse_data);
+
+void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev)
+{
+       u16 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+       switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
+       case BOARD_TYPE_5GHZ:
+               dev->cap.has_5ghz = true;
+               break;
+       case BOARD_TYPE_2GHZ:
+               dev->cap.has_2ghz = true;
+               break;
+       default:
+               dev->cap.has_2ghz = true;
+               dev->cap.has_5ghz = true;
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(mt76x02_eeprom_parse_hw_cap);
+
+bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band)
+{
+       u16 conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+       if (band == NL80211_BAND_5GHZ)
+               return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G);
+       else
+               return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
+}
+EXPORT_SYMBOL_GPL(mt76x02_ext_pa_enabled);
+
+void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band,
+                        u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g)
+{
+       u16 val;
+
+       val = mt76x02_eeprom_get(dev, MT_EE_LNA_GAIN);
+       *lna_2g = val & 0xff;
+       lna_5g[0] = val >> 8;
+
+       val = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1);
+       lna_5g[1] = val >> 8;
+
+       val = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1);
+       lna_5g[2] = val >> 8;
+
+       if (!mt76x02_field_valid(lna_5g[1]))
+               lna_5g[1] = lna_5g[0];
+
+       if (!mt76x02_field_valid(lna_5g[2]))
+               lna_5g[2] = lna_5g[0];
+
+       if (band == NL80211_BAND_2GHZ)
+               *rssi_offset = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0);
+       else
+               *rssi_offset = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0);
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_rx_gain);
+
+u8 mt76x02_get_lna_gain(struct mt76_dev *dev,
+                       s8 *lna_2g, s8 *lna_5g,
+                       struct ieee80211_channel *chan)
+{
+       u16 val;
+       u8 lna;
+
+       val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+       if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
+               *lna_2g = 0;
+       if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
+               memset(lna_5g, 0, sizeof(s8) * 3);
+
+       if (chan->band == NL80211_BAND_2GHZ)
+               lna = *lna_2g;
+       else if (chan->hw_value <= 64)
+               lna = lna_5g[0];
+       else if (chan->hw_value <= 128)
+               lna = lna_5g[1];
+       else
+               lna = lna_5g[2];
+
+       return lna != 0xff ? lna : 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_lna_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
new file mode 100644 (file)
index 0000000..bcd05f7
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x02_EEPROM_H
+#define __MT76x02_EEPROM_H
+
+enum mt76x02_eeprom_field {
+       MT_EE_CHIP_ID =                         0x000,
+       MT_EE_VERSION =                         0x002,
+       MT_EE_MAC_ADDR =                        0x004,
+       MT_EE_PCI_ID =                          0x00A,
+       MT_EE_NIC_CONF_0 =                      0x034,
+       MT_EE_NIC_CONF_1 =                      0x036,
+       MT_EE_COUNTRY_REGION_5GHZ =             0x038,
+       MT_EE_COUNTRY_REGION_2GHZ =             0x039,
+       MT_EE_FREQ_OFFSET =                     0x03a,
+       MT_EE_NIC_CONF_2 =                      0x042,
+
+       MT_EE_XTAL_TRIM_1 =                     0x03a,
+       MT_EE_XTAL_TRIM_2 =                     0x09e,
+
+       MT_EE_LNA_GAIN =                        0x044,
+       MT_EE_RSSI_OFFSET_2G_0 =                0x046,
+       MT_EE_RSSI_OFFSET_2G_1 =                0x048,
+       MT_EE_LNA_GAIN_5GHZ_1 =                 0x049,
+       MT_EE_RSSI_OFFSET_5G_0 =                0x04a,
+       MT_EE_RSSI_OFFSET_5G_1 =                0x04c,
+       MT_EE_LNA_GAIN_5GHZ_2 =                 0x04d,
+
+       MT_EE_TX_POWER_DELTA_BW40 =             0x050,
+       MT_EE_TX_POWER_DELTA_BW80 =             0x052,
+
+       MT_EE_TX_POWER_EXT_PA_5G =              0x054,
+
+       MT_EE_TX_POWER_0_START_2G =             0x056,
+       MT_EE_TX_POWER_1_START_2G =             0x05c,
+
+       /* used as byte arrays */
+#define MT_TX_POWER_GROUP_SIZE_5G              5
+#define MT_TX_POWER_GROUPS_5G                  6
+       MT_EE_TX_POWER_0_START_5G =             0x062,
+
+       MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA =  0x074,
+       MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE =      0x076,
+
+       MT_EE_TX_POWER_1_START_5G =             0x080,
+
+       MT_EE_TX_POWER_CCK =                    0x0a0,
+       MT_EE_TX_POWER_OFDM_2G_6M =             0x0a2,
+       MT_EE_TX_POWER_OFDM_2G_24M =            0x0a4,
+       MT_EE_TX_POWER_OFDM_5G_6M =             0x0b2,
+       MT_EE_TX_POWER_OFDM_5G_24M =            0x0b4,
+       MT_EE_TX_POWER_HT_MCS0 =                0x0a6,
+       MT_EE_TX_POWER_HT_MCS4 =                0x0a8,
+       MT_EE_TX_POWER_HT_MCS8 =                0x0aa,
+       MT_EE_TX_POWER_HT_MCS12 =               0x0ac,
+       MT_EE_TX_POWER_VHT_MCS0 =               0x0ba,
+       MT_EE_TX_POWER_VHT_MCS4 =               0x0bc,
+       MT_EE_TX_POWER_VHT_MCS8 =               0x0be,
+
+       MT_EE_2G_TARGET_POWER =                 0x0d0,
+       MT_EE_TEMP_OFFSET =                     0x0d1,
+       MT_EE_5G_TARGET_POWER =                 0x0d2,
+       MT_EE_TSSI_BOUND1 =                     0x0d4,
+       MT_EE_TSSI_BOUND2 =                     0x0d6,
+       MT_EE_TSSI_BOUND3 =                     0x0d8,
+       MT_EE_TSSI_BOUND4 =                     0x0da,
+       MT_EE_FREQ_OFFSET_COMPENSATION =        0x0db,
+       MT_EE_TSSI_BOUND5 =                     0x0dc,
+       MT_EE_TX_POWER_BYRATE_BASE =            0x0de,
+
+       MT_EE_RF_TEMP_COMP_SLOPE_5G =           0x0f2,
+       MT_EE_RF_TEMP_COMP_SLOPE_2G =           0x0f4,
+
+       MT_EE_RF_2G_TSSI_OFF_TXPOWER =          0x0f6,
+       MT_EE_RF_2G_RX_HIGH_GAIN =              0x0f8,
+       MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN =       0x0fa,
+       MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN =       0x0fc,
+       MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN =       0x0fe,
+
+       MT_EE_BT_RCAL_RESULT =                  0x138,
+       MT_EE_BT_VCDL_CALIBRATION =             0x13c,
+       MT_EE_BT_PMUCFG =                       0x13e,
+
+       MT_EE_USAGE_MAP_START =                 0x1e0,
+       MT_EE_USAGE_MAP_END =                   0x1fc,
+
+       __MT_EE_MAX
+};
+
+#define MT_EE_NIC_CONF_0_RX_PATH               GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH               GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_PA_TYPE               GENMASK(9, 8)
+#define MT_EE_NIC_CONF_0_PA_INT_2G             BIT(8)
+#define MT_EE_NIC_CONF_0_PA_INT_5G             BIT(9)
+#define MT_EE_NIC_CONF_0_PA_IO_CURRENT         BIT(10)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE            GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL            BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC           BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G            BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G            BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN             BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM             GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM             GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV             BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION           GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE          BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD           GENMASK(15, 13)
+
+#define MT_EFUSE_USAGE_MAP_SIZE                        (MT_EE_USAGE_MAP_END - \
+                                                MT_EE_USAGE_MAP_START + 1)
+
+enum mt76x02_eeprom_modes {
+       MT_EE_READ,
+       MT_EE_PHYSICAL_READ,
+};
+
+enum mt76x02_board_type {
+       BOARD_TYPE_2GHZ = 1,
+       BOARD_TYPE_5GHZ = 2,
+};
+
+static inline bool mt76x02_field_valid(u8 val)
+{
+       return val != 0 && val != 0xff;
+}
+
+static inline int
+mt76x02_sign_extend(u32 val, unsigned int size)
+{
+       bool sign = val & BIT(size - 1);
+
+       val &= BIT(size - 1) - 1;
+
+       return sign ? val : -val;
+}
+
+static inline int
+mt76x02_sign_extend_optional(u32 val, unsigned int size)
+{
+       bool enable = val & BIT(size);
+
+       return enable ? mt76x02_sign_extend(val, size) : 0;
+}
+
+static inline s8 mt76x02_rate_power_val(u8 val)
+{
+       if (!mt76x02_field_valid(val))
+               return 0;
+
+       return mt76x02_sign_extend_optional(val, 7);
+}
+
+static inline int
+mt76x02_eeprom_get(struct mt76_dev *dev,
+                  enum mt76x02_eeprom_field field)
+{
+       if ((field & 1) || field >= __MT_EE_MAX)
+               return -1;
+
+       return get_unaligned_le16(dev->eeprom.data + field);
+}
+
+static inline bool
+mt76x02_temp_tx_alc_enabled(struct mt76_dev *dev)
+{
+       u16 val;
+
+       val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+       if (!(val & BIT(15)))
+               return false;
+
+       return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+              MT_EE_NIC_CONF_1_TEMP_TX_ALC;
+}
+
+static inline bool
+mt76x02_tssi_enabled(struct mt76_dev *dev)
+{
+       return !mt76x02_temp_tx_alc_enabled(dev) &&
+              (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+               MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band);
+int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf,
+                          int len, enum mt76x02_eeprom_modes mode);
+void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band,
+                        u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g);
+u8 mt76x02_get_lna_gain(struct mt76_dev *dev,
+                       s8 *lna_2g, s8 *lna_5g,
+                       struct ieee80211_channel *chan);
+void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev);
+
+#endif /* __MT76x02_EEPROM_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
new file mode 100644 (file)
index 0000000..2442454
--- /dev/null
@@ -0,0 +1,745 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x02.h"
+#include "mt76x02_trace.h"
+
+enum mt76x02_cipher_type
+mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+       memset(key_data, 0, 32);
+       if (!key)
+               return MT_CIPHER_NONE;
+
+       if (key->keylen > 32)
+               return MT_CIPHER_NONE;
+
+       memcpy(key_data, key->key, key->keylen);
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+               return MT_CIPHER_WEP40;
+       case WLAN_CIPHER_SUITE_WEP104:
+               return MT_CIPHER_WEP104;
+       case WLAN_CIPHER_SUITE_TKIP:
+               return MT_CIPHER_TKIP;
+       case WLAN_CIPHER_SUITE_CCMP:
+               return MT_CIPHER_AES_CCMP;
+       default:
+               return MT_CIPHER_NONE;
+       }
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_get_key_info);
+
+int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx,
+                               struct ieee80211_key_conf *key)
+{
+       enum mt76x02_cipher_type cipher;
+       u8 key_data[32];
+       u32 val;
+
+       cipher = mt76x02_mac_get_key_info(key, key_data);
+       if (cipher == MT_CIPHER_NONE && key)
+               return -EOPNOTSUPP;
+
+       val = __mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+       val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+       val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+       __mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+       __mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+                      sizeof(key_data));
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
+
+int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx,
+                           struct ieee80211_key_conf *key)
+{
+       enum mt76x02_cipher_type cipher;
+       u8 key_data[32];
+       u8 iv_data[8];
+
+       cipher = mt76x02_mac_get_key_info(key, key_data);
+       if (cipher == MT_CIPHER_NONE && key)
+               return -EOPNOTSUPP;
+
+       __mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+       __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+
+       memset(iv_data, 0, sizeof(iv_data));
+       if (key) {
+               __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+                                !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+               iv_data[3] = key->keyidx << 6;
+               if (cipher >= MT_CIPHER_TKIP)
+                       iv_data[3] |= 0x20;
+       }
+
+       __mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_key);
+
+void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+       struct mt76_wcid_addr addr = {};
+       u32 attr;
+
+       attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+              FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+       __mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+       __mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+       __mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+
+       if (idx >= 128)
+               return;
+
+       if (mac)
+               memcpy(addr.macaddr, mac, ETH_ALEN);
+
+       __mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup);
+
+void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop)
+{
+       u32 val = __mt76_rr(dev, MT_WCID_DROP(idx));
+       u32 bit = MT_WCID_DROP_MASK(idx);
+
+       /* prevent unnecessary writes */
+       if ((val & bit) != (bit * drop))
+               __mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_drop);
+
+void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
+{
+       struct mt76_txq *mtxq;
+
+       if (!txq)
+               return;
+
+       mtxq = (struct mt76_txq *) txq->drv_priv;
+       if (txq->sta) {
+               struct mt76x02_sta *sta;
+
+               sta = (struct mt76x02_sta *) txq->sta->drv_priv;
+               mtxq->wcid = &sta->wcid;
+       } else {
+               struct mt76x02_vif *mvif;
+
+               mvif = (struct mt76x02_vif *) txq->vif->drv_priv;
+               mtxq->wcid = &mvif->group_wcid;
+       }
+
+       mt76_txq_init(dev, txq);
+}
+EXPORT_SYMBOL_GPL(mt76x02_txq_init);
+
+static void
+mt76x02_mac_fill_txwi(struct mt76x02_txwi *txwi, struct sk_buff *skb,
+                     struct ieee80211_sta *sta, int len, u8 nss)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       u16 txwi_flags = 0;
+
+       if (info->flags & IEEE80211_TX_CTL_LDPC)
+               txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+       if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+               txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+       if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+               txwi_flags |= MT_TXWI_FLAGS_MMPS;
+       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+               txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+       if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+               txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+       if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+               txwi->pktid |= MT_TXWI_PKTID_PROBE;
+       if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+               u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+               ba_size <<= sta->ht_cap.ampdu_factor;
+               ba_size = min_t(int, 63, ba_size - 1);
+               if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+                       ba_size = 0;
+               txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+               txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+                        FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+                                   sta->ht_cap.ampdu_density);
+       }
+
+       if (ieee80211_is_probe_resp(hdr->frame_control) ||
+           ieee80211_is_beacon(hdr->frame_control))
+               txwi_flags |= MT_TXWI_FLAGS_TS;
+
+       txwi->flags |= cpu_to_le16(txwi_flags);
+       txwi->len_ctl = cpu_to_le16(len);
+}
+
+static __le16
+mt76x02_mac_tx_rate_val(struct mt76_dev *dev,
+                      const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+       u16 rateval;
+       u8 phy, rate_idx;
+       u8 nss = 1;
+       u8 bw = 0;
+
+       if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+               rate_idx = rate->idx;
+               nss = 1 + (rate->idx >> 4);
+               phy = MT_PHY_TYPE_VHT;
+               if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+                       bw = 2;
+               else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+                       bw = 1;
+       } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+               rate_idx = rate->idx;
+               nss = 1 + (rate->idx >> 3);
+               phy = MT_PHY_TYPE_HT;
+               if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+                       phy = MT_PHY_TYPE_HT_GF;
+               if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+                       bw = 1;
+       } else {
+               const struct ieee80211_rate *r;
+               int band = dev->chandef.chan->band;
+               u16 val;
+
+               r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx];
+               if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+                       val = r->hw_value_short;
+               else
+                       val = r->hw_value;
+
+               phy = val >> 8;
+               rate_idx = val & 0xff;
+               bw = 0;
+       }
+
+       rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+       rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+       rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+       if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+               rateval |= MT_RXWI_RATE_SGI;
+
+       *nss_val = nss;
+       return cpu_to_le16(rateval);
+}
+
+void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid,
+                             const struct ieee80211_tx_rate *rate)
+{
+       spin_lock_bh(&dev->lock);
+       wcid->tx_rate = mt76x02_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+       wcid->tx_rate_set = true;
+       spin_unlock_bh(&dev->lock);
+}
+
+bool mt76x02_mac_load_tx_status(struct mt76_dev *dev,
+                              struct mt76x02_tx_status *stat)
+{
+       u32 stat1, stat2;
+
+       stat2 = __mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+       stat1 = __mt76_rr(dev, MT_TX_STAT_FIFO);
+
+       stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+       if (!stat->valid)
+               return false;
+
+       stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+       stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+       stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+       stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+       stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+       stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+       stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_load_tx_status);
+
+static int
+mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+                          enum nl80211_band band)
+{
+       u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+       txrate->idx = 0;
+       txrate->flags = 0;
+       txrate->count = 1;
+
+       switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+       case MT_PHY_TYPE_OFDM:
+               if (band == NL80211_BAND_2GHZ)
+                       idx += 4;
+
+               txrate->idx = idx;
+               return 0;
+       case MT_PHY_TYPE_CCK:
+               if (idx >= 8)
+                       idx -= 8;
+
+               txrate->idx = idx;
+               return 0;
+       case MT_PHY_TYPE_HT_GF:
+               txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+               /* fall through */
+       case MT_PHY_TYPE_HT:
+               txrate->flags |= IEEE80211_TX_RC_MCS;
+               txrate->idx = idx;
+               break;
+       case MT_PHY_TYPE_VHT:
+               txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+               txrate->idx = idx;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+       case MT_PHY_BW_20:
+               break;
+       case MT_PHY_BW_40:
+               txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+               break;
+       case MT_PHY_BW_80:
+               txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (rate & MT_RXWI_RATE_SGI)
+               txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+       return 0;
+}
+
+void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
+                           struct sk_buff *skb, struct mt76_wcid *wcid,
+                           struct ieee80211_sta *sta, int len)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_rate *rate = &info->control.rates[0];
+       struct ieee80211_key_conf *key = info->control.hw_key;
+       u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+       u8 nss;
+       s8 txpwr_adj, max_txpwr_adj;
+       u8 ccmp_pn[8], nstreams = dev->chainmask & 0xf;
+
+       memset(txwi, 0, sizeof(*txwi));
+
+       if (wcid)
+               txwi->wcid = wcid->idx;
+       else
+               txwi->wcid = 0xff;
+
+       txwi->pktid = 1;
+
+       if (wcid && wcid->sw_iv && key) {
+               u64 pn = atomic64_inc_return(&key->tx_pn);
+               ccmp_pn[0] = pn;
+               ccmp_pn[1] = pn >> 8;
+               ccmp_pn[2] = 0;
+               ccmp_pn[3] = 0x20 | (key->keyidx << 6);
+               ccmp_pn[4] = pn >> 16;
+               ccmp_pn[5] = pn >> 24;
+               ccmp_pn[6] = pn >> 32;
+               ccmp_pn[7] = pn >> 40;
+               txwi->iv = *((__le32 *)&ccmp_pn[0]);
+               txwi->eiv = *((__le32 *)&ccmp_pn[1]);
+       }
+
+       spin_lock_bh(&dev->lock);
+       if (wcid && (rate->idx < 0 || !rate->count)) {
+               txwi->rate = wcid->tx_rate;
+               max_txpwr_adj = wcid->max_txpwr_adj;
+               nss = wcid->tx_rate_nss;
+       } else {
+               txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
+               max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
+       }
+       spin_unlock_bh(&dev->lock);
+
+       txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf,
+                                            max_txpwr_adj);
+       txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+       if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E4)
+               txwi->txstream = 0x13;
+       else if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E3 &&
+                !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+               txwi->txstream = 0x93;
+
+       mt76x02_mac_fill_txwi(txwi, skb, sta, len, nss);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
+
+static void
+mt76x02_mac_fill_tx_status(struct mt76_dev *dev,
+                         struct ieee80211_tx_info *info,
+                         struct mt76x02_tx_status *st, int n_frames)
+{
+       struct ieee80211_tx_rate *rate = info->status.rates;
+       int cur_idx, last_rate;
+       int i;
+
+       if (!n_frames)
+               return;
+
+       last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+       mt76x02_mac_process_tx_rate(&rate[last_rate], st->rate,
+                                  dev->chandef.chan->band);
+       if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+               rate[last_rate + 1].idx = -1;
+
+       cur_idx = rate[last_rate].idx + last_rate;
+       for (i = 0; i <= last_rate; i++) {
+               rate[i].flags = rate[last_rate].flags;
+               rate[i].idx = max_t(int, 0, cur_idx - i);
+               rate[i].count = 1;
+       }
+       rate[last_rate].count = st->retry + 1 - last_rate;
+
+       info->status.ampdu_len = n_frames;
+       info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+       if (st->pktid & MT_TXWI_PKTID_PROBE)
+               info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+       if (st->aggr)
+               info->flags |= IEEE80211_TX_CTL_AMPDU |
+                              IEEE80211_TX_STAT_AMPDU;
+
+       if (!st->ack_req)
+               info->flags |= IEEE80211_TX_CTL_NO_ACK;
+       else if (st->success)
+               info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+void mt76x02_send_tx_status(struct mt76_dev *dev,
+                          struct mt76x02_tx_status *stat, u8 *update)
+{
+       struct ieee80211_tx_info info = {};
+       struct ieee80211_sta *sta = NULL;
+       struct mt76_wcid *wcid = NULL;
+       struct mt76x02_sta *msta = NULL;
+
+       rcu_read_lock();
+       if (stat->wcid < ARRAY_SIZE(dev->wcid))
+               wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+       if (wcid) {
+               void *priv;
+
+               priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
+               sta = container_of(priv, struct ieee80211_sta,
+                                  drv_priv);
+       }
+
+       if (msta && stat->aggr) {
+               u32 stat_val, stat_cache;
+
+               stat_val = stat->rate;
+               stat_val |= ((u32) stat->retry) << 16;
+               stat_cache = msta->status.rate;
+               stat_cache |= ((u32) msta->status.retry) << 16;
+
+               if (*update == 0 && stat_val == stat_cache &&
+                   stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+                       msta->n_frames++;
+                       goto out;
+               }
+
+               mt76x02_mac_fill_tx_status(dev, &info, &msta->status,
+                                         msta->n_frames);
+
+               msta->status = *stat;
+               msta->n_frames = 1;
+               *update = 0;
+       } else {
+               mt76x02_mac_fill_tx_status(dev, &info, stat, 1);
+               *update = 1;
+       }
+
+       ieee80211_tx_status_noskb(dev->hw, sta, &info);
+
+out:
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(mt76x02_send_tx_status);
+
+int
+mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
+{
+       u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+       switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+       case MT_PHY_TYPE_OFDM:
+               if (idx >= 8)
+                       idx = 0;
+
+               if (status->band == NL80211_BAND_2GHZ)
+                       idx += 4;
+
+               status->rate_idx = idx;
+               return 0;
+       case MT_PHY_TYPE_CCK:
+               if (idx >= 8) {
+                       idx -= 8;
+                       status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+               }
+
+               if (idx >= 4)
+                       idx = 0;
+
+               status->rate_idx = idx;
+               return 0;
+       case MT_PHY_TYPE_HT_GF:
+               status->enc_flags |= RX_ENC_FLAG_HT_GF;
+               /* fall through */
+       case MT_PHY_TYPE_HT:
+               status->encoding = RX_ENC_HT;
+               status->rate_idx = idx;
+               break;
+       case MT_PHY_TYPE_VHT:
+               status->encoding = RX_ENC_VHT;
+               status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+               status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (rate & MT_RXWI_RATE_LDPC)
+               status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+       if (rate & MT_RXWI_RATE_SGI)
+               status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+       if (rate & MT_RXWI_RATE_STBC)
+               status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+       switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+       case MT_PHY_BW_20:
+               break;
+       case MT_PHY_BW_40:
+               status->bw = RATE_INFO_BW_40;
+               break;
+       case MT_PHY_BW_80:
+               status->bw = RATE_INFO_BW_80;
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_process_rate);
+
+void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr)
+{
+       ether_addr_copy(dev->macaddr, addr);
+
+       if (!is_valid_ether_addr(dev->macaddr)) {
+               eth_random_addr(dev->macaddr);
+               dev_info(dev->dev,
+                        "Invalid MAC address, using random address %pM\n",
+                        dev->macaddr);
+       }
+
+       __mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
+       __mt76_wr(dev, MT_MAC_ADDR_DW1,
+                 get_unaligned_le16(dev->macaddr + 4) |
+                 FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
+
+static int
+mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain)
+{
+       struct mt76x02_rx_freq_cal *cal = &dev->cal.rx;
+
+       rssi += cal->rssi_offset[chain];
+       rssi -= cal->lna_gain;
+
+       return rssi;
+}
+
+int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
+                          void *rxi)
+{
+       struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+       struct mt76x02_rxwi *rxwi = rxi;
+       struct mt76x02_sta *sta;
+       u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
+       u32 ctl = le32_to_cpu(rxwi->ctl);
+       u16 rate = le16_to_cpu(rxwi->rate);
+       u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
+       bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
+       int i, pad_len = 0, nstreams = dev->mt76.chainmask & 0xf;
+       s8 signal;
+       u8 pn_len;
+       u8 wcid;
+       int len;
+
+       if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+               return -EINVAL;
+
+       if (rxinfo & MT_RXINFO_L2PAD)
+               pad_len += 2;
+
+       if (rxinfo & MT_RXINFO_DECRYPT) {
+               status->flag |= RX_FLAG_DECRYPTED;
+               status->flag |= RX_FLAG_MMIC_STRIPPED;
+               status->flag |= RX_FLAG_MIC_STRIPPED;
+               status->flag |= RX_FLAG_IV_STRIPPED;
+       }
+
+       wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
+       sta = mt76x02_rx_get_sta(&dev->mt76, wcid);
+       status->wcid = mt76x02_rx_get_sta_wcid(sta, unicast);
+
+       len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+       pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
+       if (pn_len) {
+               int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
+               u8 *data = skb->data + offset;
+
+               status->iv[0] = data[7];
+               status->iv[1] = data[6];
+               status->iv[2] = data[5];
+               status->iv[3] = data[4];
+               status->iv[4] = data[1];
+               status->iv[5] = data[0];
+
+               /*
+                * Driver CCMP validation can't deal with fragments.
+                * Let mac80211 take care of it.
+                */
+               if (rxinfo & MT_RXINFO_FRAG) {
+                       status->flag &= ~RX_FLAG_IV_STRIPPED;
+               } else {
+                       pad_len += pn_len << 2;
+                       len -= pn_len << 2;
+               }
+       }
+
+       mt76x02_remove_hdr_pad(skb, pad_len);
+
+       if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
+               status->aggr = true;
+
+       if (WARN_ON_ONCE(len > skb->len))
+               return -EINVAL;
+
+       pskb_trim(skb, len);
+
+       status->chains = BIT(0);
+       signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
+       for (i = 1; i < nstreams; i++) {
+               status->chains |= BIT(i);
+               status->chain_signal[i] = mt76x02_mac_get_rssi(dev,
+                                                              rxwi->rssi[i],
+                                                              i);
+               signal = max_t(s8, signal, status->chain_signal[i]);
+       }
+       status->signal = signal;
+       status->freq = dev->mt76.chandef.chan->center_freq;
+       status->band = dev->mt76.chandef.chan->band;
+
+       status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
+       status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
+
+       if (sta) {
+               ewma_signal_add(&sta->rssi, status->signal);
+               sta->inactive_count = 0;
+       }
+
+       return mt76x02_mac_process_rate(status, rate);
+}
+
+void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
+{
+       struct mt76x02_tx_status stat = {};
+       unsigned long flags;
+       u8 update = 1;
+       bool ret;
+
+       if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+               return;
+
+       trace_mac_txstat_poll(dev);
+
+       while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
+               spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
+               ret = mt76x02_mac_load_tx_status(&dev->mt76, &stat);
+               spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
+
+               if (!ret)
+                       break;
+
+               trace_mac_txstat_fetch(dev, &stat);
+
+               if (!irq) {
+                       mt76x02_send_tx_status(&dev->mt76, &stat, &update);
+                       continue;
+               }
+
+               kfifo_put(&dev->txstatus_fifo, stat);
+       }
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_poll_tx_status);
+
+static void
+mt76x02_mac_queue_txdone(struct mt76x02_dev *dev, struct sk_buff *skb,
+                        void *txwi_ptr)
+{
+       struct mt76x02_tx_info *txi = mt76x02_skb_tx_info(skb);
+       struct mt76x02_txwi *txwi = txwi_ptr;
+
+       mt76x02_mac_poll_tx_status(dev, false);
+
+       txi->tries = 0;
+       txi->jiffies = jiffies;
+       txi->wcid = txwi->wcid;
+       txi->pktid = txwi->pktid;
+       trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
+       mt76x02_tx_complete(&dev->mt76, skb);
+}
+
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+                            struct mt76_queue_entry *e, bool flush)
+{
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+       if (e->txwi)
+               mt76x02_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
+       else
+               dev_kfree_skb_any(e->skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
new file mode 100644 (file)
index 0000000..4f7ee46
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76X02_MAC_H
+#define __MT76X02_MAC_H
+
+#include <linux/average.h>
+
+struct mt76x02_dev;
+
+struct mt76x02_tx_status {
+       u8 valid:1;
+       u8 success:1;
+       u8 aggr:1;
+       u8 ack_req:1;
+       u8 wcid;
+       u8 pktid;
+       u8 retry;
+       u16 rate;
+} __packed __aligned(2);
+
+#define MT_VIF_WCID(_n)                (254 - ((_n) & 7))
+#define MT_MAX_VIFS            8
+
+struct mt76x02_vif {
+       u8 idx;
+
+       struct mt76_wcid group_wcid;
+};
+
+struct mt76x02_tx_info {
+       unsigned long jiffies;
+       u8 tries;
+
+       u8 wcid;
+       u8 pktid;
+       u8 retry;
+};
+
+DECLARE_EWMA(signal, 10, 8);
+
+struct mt76x02_sta {
+       struct mt76_wcid wcid; /* must be first */
+
+       struct mt76x02_vif *vif;
+       struct mt76x02_tx_status status;
+       int n_frames;
+
+       struct ewma_signal rssi;
+       int inactive_count;
+};
+
+#define MT_RXINFO_BA                   BIT(0)
+#define MT_RXINFO_DATA                 BIT(1)
+#define MT_RXINFO_NULL                 BIT(2)
+#define MT_RXINFO_FRAG                 BIT(3)
+#define MT_RXINFO_UNICAST              BIT(4)
+#define MT_RXINFO_MULTICAST            BIT(5)
+#define MT_RXINFO_BROADCAST            BIT(6)
+#define MT_RXINFO_MYBSS                        BIT(7)
+#define MT_RXINFO_CRCERR               BIT(8)
+#define MT_RXINFO_ICVERR               BIT(9)
+#define MT_RXINFO_MICERR               BIT(10)
+#define MT_RXINFO_AMSDU                        BIT(11)
+#define MT_RXINFO_HTC                  BIT(12)
+#define MT_RXINFO_RSSI                 BIT(13)
+#define MT_RXINFO_L2PAD                        BIT(14)
+#define MT_RXINFO_AMPDU                        BIT(15)
+#define MT_RXINFO_DECRYPT              BIT(16)
+#define MT_RXINFO_BSSIDX3              BIT(17)
+#define MT_RXINFO_WAPI_KEY             BIT(18)
+#define MT_RXINFO_PN_LEN               GENMASK(21, 19)
+#define MT_RXINFO_SW_FTYPE0            BIT(22)
+#define MT_RXINFO_SW_FTYPE1            BIT(23)
+#define MT_RXINFO_PROBE_RESP           BIT(24)
+#define MT_RXINFO_BEACON               BIT(25)
+#define MT_RXINFO_DISASSOC             BIT(26)
+#define MT_RXINFO_DEAUTH               BIT(27)
+#define MT_RXINFO_ACTION               BIT(28)
+#define MT_RXINFO_TCP_SUM_ERR          BIT(30)
+#define MT_RXINFO_IP_SUM_ERR           BIT(31)
+
+#define MT_RXWI_CTL_WCID               GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX            GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX            GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF                        GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN           GENMASK(29, 16)
+#define MT_RXWI_CTL_EOF                        BIT(31)
+
+#define MT_RXWI_TID                    GENMASK(3, 0)
+#define MT_RXWI_SN                     GENMASK(15, 4)
+
+#define MT_RXWI_RATE_INDEX             GENMASK(5, 0)
+#define MT_RXWI_RATE_LDPC              BIT(6)
+#define MT_RXWI_RATE_BW                        GENMASK(8, 7)
+#define MT_RXWI_RATE_SGI               BIT(9)
+#define MT_RXWI_RATE_STBC              BIT(10)
+#define MT_RXWI_RATE_LDPC_EXSYM                BIT(11)
+#define MT_RXWI_RATE_PHY               GENMASK(15, 13)
+
+#define MT_RATE_INDEX_VHT_IDX          GENMASK(3, 0)
+#define MT_RATE_INDEX_VHT_NSS          GENMASK(5, 4)
+
+struct mt76x02_rxwi {
+       __le32 rxinfo;
+
+       __le32 ctl;
+
+       __le16 tid_sn;
+       __le16 rate;
+
+       u8 rssi[4];
+
+       __le32 bbp_rxinfo[4];
+};
+
+#define MT_TX_PWR_ADJ                  GENMASK(3, 0)
+
+enum mt76x2_phy_bandwidth {
+       MT_PHY_BW_20,
+       MT_PHY_BW_40,
+       MT_PHY_BW_80,
+};
+
+#define MT_TXWI_FLAGS_FRAG             BIT(0)
+#define MT_TXWI_FLAGS_MMPS             BIT(1)
+#define MT_TXWI_FLAGS_CFACK            BIT(2)
+#define MT_TXWI_FLAGS_TS               BIT(3)
+#define MT_TXWI_FLAGS_AMPDU            BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY     GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP             GENMASK(9, 8)
+#define MT_TXWI_FLAGS_NDPS             BIT(10)
+#define MT_TXWI_FLAGS_RTSBWSIG         BIT(11)
+#define MT_TXWI_FLAGS_NDP_BW           GENMASK(13, 12)
+#define MT_TXWI_FLAGS_SOUND            BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT      BIT(15)
+
+#define MT_TXWI_ACK_CTL_REQ            BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ           BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW      GENMASK(7, 2)
+
+#define MT_TXWI_PKTID_PROBE            BIT(7)
+
+struct mt76x02_txwi {
+       __le16 flags;
+       __le16 rate;
+       u8 ack_ctl;
+       u8 wcid;
+       __le16 len_ctl;
+       __le32 iv;
+       __le32 eiv;
+       u8 aid;
+       u8 txstream;
+       u8 ctl2;
+       u8 pktid;
+} __packed __aligned(4);
+
+static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev)
+{
+       const u32 MAC_CSR0 = 0x1000;
+       int i;
+
+       for (i = 0; i < 500; i++) {
+               if (test_bit(MT76_REMOVED, &dev->state))
+                       return false;
+
+               switch (dev->bus->rr(dev, MAC_CSR0)) {
+               case 0:
+               case ~0:
+                       break;
+               default:
+                       return true;
+               }
+               usleep_range(5000, 10000);
+       }
+       return false;
+}
+
+static inline struct mt76x02_tx_info *
+mt76x02_skb_tx_info(struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+       return (void *)info->status.status_driver_data;
+}
+
+void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
+enum mt76x02_cipher_type
+mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data);
+
+int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx,
+                               struct ieee80211_key_conf *key);
+int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx,
+                           struct ieee80211_key_conf *key);
+void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop);
+void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid,
+                             const struct ieee80211_tx_rate *rate);
+bool mt76x02_mac_load_tx_status(struct mt76_dev *dev,
+                              struct mt76x02_tx_status *stat);
+void mt76x02_send_tx_status(struct mt76_dev *dev,
+                          struct mt76x02_tx_status *stat, u8 *update);
+int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
+                          void *rxi);
+int
+mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate);
+void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr);
+void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
+                           struct sk_buff *skb, struct mt76_wcid *wcid,
+                           struct ieee80211_sta *sta, int len);
+void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+                            struct mt76_queue_entry *e, bool flush);
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
new file mode 100644 (file)
index 0000000..6d56513
--- /dev/null
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76.h"
+#include "mt76x02_mcu.h"
+#include "mt76x02_dma.h"
+
+struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
+{
+       struct sk_buff *skb;
+
+       skb = alloc_skb(len, GFP_KERNEL);
+       if (!skb)
+               return NULL;
+       memcpy(skb_put(skb, len), data, len);
+
+       return skb;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc);
+
+static struct sk_buff *
+mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires)
+{
+       unsigned long timeout;
+
+       if (!time_is_after_jiffies(expires))
+               return NULL;
+
+       timeout = expires - jiffies;
+       wait_event_timeout(dev->mmio.mcu.wait,
+                          !skb_queue_empty(&dev->mmio.mcu.res_q),
+                          timeout);
+       return skb_dequeue(&dev->mmio.mcu.res_q);
+}
+
+static int
+mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
+                    struct sk_buff *skb, int cmd, int seq)
+{
+       struct mt76_queue *q = &dev->q_tx[qid];
+       struct mt76_queue_buf buf;
+       dma_addr_t addr;
+       u32 tx_info;
+
+       tx_info = MT_MCU_MSG_TYPE_CMD |
+                 FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+                 FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+                 FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+                 FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
+
+       addr = dma_map_single(dev->dev, skb->data, skb->len,
+                             DMA_TO_DEVICE);
+       if (dma_mapping_error(dev->dev, addr))
+               return -ENOMEM;
+
+       buf.addr = addr;
+       buf.len = skb->len;
+       spin_lock_bh(&q->lock);
+       dev->queue_ops->add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
+       dev->queue_ops->kick(dev, q);
+       spin_unlock_bh(&q->lock);
+
+       return 0;
+}
+
+int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
+                        int cmd, bool wait_resp)
+{
+       unsigned long expires = jiffies + HZ;
+       int ret;
+       u8 seq;
+
+       if (!skb)
+               return -EINVAL;
+
+       mutex_lock(&dev->mmio.mcu.mutex);
+
+       seq = ++dev->mmio.mcu.msg_seq & 0xf;
+       if (!seq)
+               seq = ++dev->mmio.mcu.msg_seq & 0xf;
+
+       ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
+       if (ret)
+               goto out;
+
+       while (wait_resp) {
+               u32 *rxfce;
+               bool check_seq = false;
+
+               skb = mt76x02_mcu_get_response(dev, expires);
+               if (!skb) {
+                       dev_err(dev->dev,
+                               "MCU message %d (seq %d) timed out\n", cmd,
+                               seq);
+                       ret = -ETIMEDOUT;
+                       break;
+               }
+
+               rxfce = (u32 *) skb->cb;
+
+               if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
+                       check_seq = true;
+
+               dev_kfree_skb(skb);
+               if (check_seq)
+                       break;
+       }
+
+out:
+       mutex_unlock(&dev->mmio.mcu.mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send);
+
+int mt76x02_mcu_function_select(struct mt76_dev *dev,
+                               enum mcu_function func,
+                               u32 val, bool wait_resp)
+{
+       struct sk_buff *skb;
+       struct {
+           __le32 id;
+           __le32 value;
+       } __packed __aligned(4) msg = {
+           .id = cpu_to_le32(func),
+           .value = cpu_to_le32(val),
+       };
+
+       skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
+       return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_FUN_SET_OP,
+                                         wait_resp);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select);
+
+int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
+                               bool wait_resp)
+{
+       struct sk_buff *skb;
+       struct {
+               __le32 mode;
+               __le32 level;
+       } __packed __aligned(4) msg = {
+               .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
+               .level = cpu_to_le32(0),
+       };
+
+       skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
+       return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP,
+                                         wait_resp);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state);
+
+int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type,
+                         u32 param, bool wait)
+{
+       struct sk_buff *skb;
+       struct {
+               __le32 id;
+               __le32 value;
+       } __packed __aligned(4) msg = {
+               .id = cpu_to_le32(type),
+               .value = cpu_to_le32(param),
+       };
+       int ret;
+
+       if (wait)
+               dev->bus->rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
+
+       skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
+       ret = dev->mcu_ops->mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
+       if (ret)
+               return ret;
+
+       if (wait &&
+           WARN_ON(!__mt76_poll_msec(dev, MT_MCU_COM_REG0,
+                                     BIT(31), BIT(31), 100)))
+               return -ETIMEDOUT;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate);
+
+int mt76x02_mcu_cleanup(struct mt76_dev *dev)
+{
+       struct sk_buff *skb;
+
+       dev->bus->wr(dev, MT_MCU_INT_LEVEL, 1);
+       usleep_range(20000, 30000);
+
+       while ((skb = skb_dequeue(&dev->mmio.mcu.res_q)) != NULL)
+               dev_kfree_skb(skb);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup);
+
+void mt76x02_set_ethtool_fwver(struct mt76_dev *dev,
+                              const struct mt76x02_fw_header *h)
+{
+       u16 bld = le16_to_cpu(h->build_ver);
+       u16 ver = le16_to_cpu(h->fw_ver);
+
+       snprintf(dev->hw->wiphy->fw_version,
+                sizeof(dev->hw->wiphy->fw_version),
+                "%d.%d.%02d-b%x",
+                (ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_ethtool_fwver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
new file mode 100644 (file)
index 0000000..ce664f8
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x02_MCU_H
+#define __MT76x02_MCU_H
+
+#define MT_MCU_RESET_CTL               0x070C
+#define MT_MCU_INT_LEVEL               0x0718
+#define MT_MCU_COM_REG0                        0x0730
+#define MT_MCU_COM_REG1                        0x0734
+#define MT_MCU_COM_REG2                        0x0738
+#define MT_MCU_COM_REG3                        0x073C
+
+#define MT_INBAND_PACKET_MAX_LEN       192
+#define MT_MCU_MEMMAP_WLAN             0x410000
+
+#define MT_MCU_PCIE_REMAP_BASE4                0x074C
+
+#define MT_MCU_SEMAPHORE_00            0x07B0
+#define MT_MCU_SEMAPHORE_01            0x07B4
+#define MT_MCU_SEMAPHORE_02            0x07B8
+#define MT_MCU_SEMAPHORE_03            0x07BC
+
+#define MT_MCU_ILM_ADDR                        0x80000
+
+enum mcu_cmd {
+       CMD_FUN_SET_OP = 1,
+       CMD_LOAD_CR = 2,
+       CMD_INIT_GAIN_OP = 3,
+       CMD_DYNC_VGA_OP = 6,
+       CMD_TDLS_CH_SW = 7,
+       CMD_BURST_WRITE = 8,
+       CMD_READ_MODIFY_WRITE = 9,
+       CMD_RANDOM_READ = 10,
+       CMD_BURST_READ = 11,
+       CMD_RANDOM_WRITE = 12,
+       CMD_LED_MODE_OP = 16,
+       CMD_POWER_SAVING_OP = 20,
+       CMD_WOW_CONFIG = 21,
+       CMD_WOW_QUERY = 22,
+       CMD_WOW_FEATURE = 24,
+       CMD_CARRIER_DETECT_OP = 28,
+       CMD_RADOR_DETECT_OP = 29,
+       CMD_SWITCH_CHANNEL_OP = 30,
+       CMD_CALIBRATION_OP = 31,
+       CMD_BEACON_OP = 32,
+       CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_power_mode {
+       RADIO_OFF = 0x30,
+       RADIO_ON = 0x31,
+       RADIO_OFF_AUTO_WAKEUP = 0x32,
+       RADIO_OFF_ADVANCE = 0x33,
+       RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_function {
+       Q_SELECT = 1,
+       BW_SETTING = 2,
+       USB2_SW_DISCONNECT = 2,
+       USB3_SW_DISCONNECT = 3,
+       LOG_FW_DEBUG_MSG = 4,
+       GET_FW_VERSION = 5,
+};
+
+struct mt76x02_fw_header {
+       __le32 ilm_len;
+       __le32 dlm_len;
+       __le16 build_ver;
+       __le16 fw_ver;
+       u8 pad[4];
+       char build_time[16];
+};
+
+struct mt76x02_patch_header {
+       char build_time[16];
+       char platform[4];
+       char hw_version[4];
+       char patch_version[4];
+       u8 pad[2];
+};
+
+int mt76x02_mcu_cleanup(struct mt76_dev *dev);
+int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type,
+                         u32 param, bool wait);
+struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len);
+int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
+                        int cmd, bool wait_resp);
+int mt76x02_mcu_function_select(struct mt76_dev *dev,
+                               enum mcu_function func,
+                               u32 val, bool wait_resp);
+int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
+                               bool wait_resp);
+void mt76x02_set_ethtool_fwver(struct mt76_dev *dev,
+                              const struct mt76x02_fw_header *h);
+
+#endif /* __MT76x02_MCU_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
new file mode 100644 (file)
index 0000000..1b94507
--- /dev/null
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+
+#include "mt76x02.h"
+#include "mt76x02_trace.h"
+
+static int
+mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
+                     int idx, int n_desc)
+{
+       int ret;
+
+       q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
+       q->ndesc = n_desc;
+       q->hw_idx = idx;
+
+       ret = mt76_queue_alloc(dev, q);
+       if (ret)
+               return ret;
+
+       mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
+
+       return 0;
+}
+
+static int
+mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
+                     int idx, int n_desc, int bufsize)
+{
+       int ret;
+
+       q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
+       q->ndesc = n_desc;
+       q->buf_size = bufsize;
+
+       ret = mt76_queue_alloc(dev, q);
+       if (ret)
+               return ret;
+
+       mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
+
+       return 0;
+}
+
+static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
+{
+       struct mt76x02_tx_status stat;
+       u8 update = 1;
+
+       while (kfifo_get(&dev->txstatus_fifo, &stat))
+               mt76x02_send_tx_status(&dev->mt76, &stat, &update);
+}
+
+static void mt76x02_tx_tasklet(unsigned long data)
+{
+       struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
+       int i;
+
+       mt76x02_process_tx_status_fifo(dev);
+
+       for (i = MT_TXQ_MCU; i >= 0; i--)
+               mt76_queue_tx_cleanup(dev, i, false);
+
+       mt76x02_mac_poll_tx_status(dev, false);
+       mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
+}
+
+int mt76x02_dma_init(struct mt76x02_dev *dev)
+{
+       struct mt76_txwi_cache __maybe_unused *t;
+       int i, ret, fifo_size;
+       struct mt76_queue *q;
+       void *status_fifo;
+
+       BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi));
+       BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
+
+       fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
+       status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
+       if (!status_fifo)
+               return -ENOMEM;
+
+       tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev);
+       kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+
+       mt76_dma_attach(&dev->mt76);
+
+       mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+       for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+               ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i],
+                                           mt76_ac_to_hwq(i),
+                                           MT_TX_RING_SIZE);
+               if (ret)
+                       return ret;
+       }
+
+       ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+                                   MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+       if (ret)
+               return ret;
+
+       ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+                                   MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
+       if (ret)
+               return ret;
+
+       ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+                                   MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+       if (ret)
+               return ret;
+
+       q = &dev->mt76.q_rx[MT_RXQ_MAIN];
+       q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
+       ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
+                                   MT_RX_BUF_SIZE);
+       if (ret)
+               return ret;
+
+       return mt76_init_queues(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_init);
+
+void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+       struct mt76x02_dev *dev;
+
+       dev = container_of(mdev, struct mt76x02_dev, mt76);
+       mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
+
+irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
+{
+       struct mt76x02_dev *dev = dev_instance;
+       u32 intr;
+
+       intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+       mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+       if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+               return IRQ_NONE;
+
+       trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask);
+
+       intr &= dev->mt76.mmio.irqmask;
+
+       if (intr & MT_INT_TX_DONE_ALL) {
+               mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
+               tasklet_schedule(&dev->tx_tasklet);
+       }
+
+       if (intr & MT_INT_RX_DONE(0)) {
+               mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
+               napi_schedule(&dev->mt76.napi[0]);
+       }
+
+       if (intr & MT_INT_RX_DONE(1)) {
+               mt76x02_irq_disable(dev, MT_INT_RX_DONE(1));
+               napi_schedule(&dev->mt76.napi[1]);
+       }
+
+       if (intr & MT_INT_PRE_TBTT)
+               tasklet_schedule(&dev->pre_tbtt_tasklet);
+
+       /* send buffered multicast frames now */
+       if (intr & MT_INT_TBTT)
+               mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+
+       if (intr & MT_INT_TX_STAT) {
+               mt76x02_mac_poll_tx_status(dev, true);
+               tasklet_schedule(&dev->tx_tasklet);
+       }
+
+       if (intr & MT_INT_GPTIMER) {
+               mt76x02_irq_disable(dev, MT_INT_GPTIMER);
+               tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
+       }
+
+       return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
+
+void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
+       dev->mt76.mmio.irqmask &= ~clear;
+       dev->mt76.mmio.irqmask |= set;
+       mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
+       spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask);
+
+static void mt76x02_dma_enable(struct mt76x02_dev *dev)
+{
+       u32 val;
+
+       mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+       mt76x02_wait_for_wpdma(&dev->mt76, 1000);
+       usleep_range(50, 100);
+
+       val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
+             MT_WPDMA_GLO_CFG_TX_DMA_EN |
+             MT_WPDMA_GLO_CFG_RX_DMA_EN;
+       mt76_set(dev, MT_WPDMA_GLO_CFG, val);
+       mt76_clear(dev, MT_WPDMA_GLO_CFG,
+                  MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_enable);
+
+void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
+{
+       tasklet_kill(&dev->tx_tasklet);
+       mt76_dma_cleanup(&dev->mt76);
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
+
+void mt76x02_dma_disable(struct mt76x02_dev *dev)
+{
+       u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+
+       val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
+              MT_WPDMA_GLO_CFG_BIG_ENDIAN |
+              MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
+       val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
+       mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
+
+void mt76x02_mac_start(struct mt76x02_dev *dev)
+{
+       mt76x02_dma_enable(dev);
+       mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+       mt76_wr(dev, MT_MAC_SYS_CTRL,
+               MT_MAC_SYS_CTRL_ENABLE_TX |
+               MT_MAC_SYS_CTRL_ENABLE_RX);
+       mt76x02_irq_enable(dev,
+                          MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+                          MT_INT_TX_STAT);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_start);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
new file mode 100644 (file)
index 0000000..d31ce1d
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include "mt76.h"
+#include "mt76x02_phy.h"
+#include "mt76x02_mac.h"
+
+void mt76x02_phy_set_rxpath(struct mt76_dev *dev)
+{
+       u32 val;
+
+       val = __mt76_rr(dev, MT_BBP(AGC, 0));
+       val &= ~BIT(4);
+
+       switch (dev->chainmask & 0xf) {
+       case 2:
+               val |= BIT(3);
+               break;
+       default:
+               val &= ~BIT(3);
+               break;
+       }
+
+       __mt76_wr(dev, MT_BBP(AGC, 0), val);
+       mb();
+       val = __mt76_rr(dev, MT_BBP(AGC, 0));
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath);
+
+void mt76x02_phy_set_txdac(struct mt76_dev *dev)
+{
+       int txpath;
+
+       txpath = (dev->chainmask >> 8) & 0xf;
+       switch (txpath) {
+       case 2:
+               __mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
+               break;
+       default:
+               __mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_txdac);
+
+static u32
+mt76x02_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+       u32 val = 0;
+
+       val |= (v1 & (BIT(6) - 1)) << 0;
+       val |= (v2 & (BIT(6) - 1)) << 8;
+       val |= (v3 & (BIT(6) - 1)) << 16;
+       val |= (v4 & (BIT(6) - 1)) << 24;
+       return val;
+}
+
+int mt76x02_get_max_rate_power(struct mt76_rate_power *r)
+{
+       s8 ret = 0;
+       int i;
+
+       for (i = 0; i < sizeof(r->all); i++)
+               ret = max(ret, r->all[i]);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_max_rate_power);
+
+void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+       int i;
+
+       for (i = 0; i < sizeof(r->all); i++)
+               if (r->all[i] > limit)
+                       r->all[i] = limit;
+}
+EXPORT_SYMBOL_GPL(mt76x02_limit_rate_power);
+
+void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+       int i;
+
+       for (i = 0; i < sizeof(r->all); i++)
+               r->all[i] += offset;
+}
+EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset);
+
+void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_1)
+{
+       struct mt76_rate_power *t = &dev->rate_power;
+
+       __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0,
+                        txp_0);
+       __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1,
+                        txp_1);
+
+       __mt76_wr(dev, MT_TX_PWR_CFG_0,
+                 mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
+                                       t->ofdm[2]));
+       __mt76_wr(dev, MT_TX_PWR_CFG_1,
+                 mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
+                                       t->ht[2]));
+       __mt76_wr(dev, MT_TX_PWR_CFG_2,
+                 mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
+                                       t->ht[10]));
+       __mt76_wr(dev, MT_TX_PWR_CFG_3,
+                 mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
+                                       t->stbc[2]));
+       __mt76_wr(dev, MT_TX_PWR_CFG_4,
+                 mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
+       __mt76_wr(dev, MT_TX_PWR_CFG_7,
+                 mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
+                                       t->vht[9]));
+       __mt76_wr(dev, MT_TX_PWR_CFG_8,
+                 mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
+       __mt76_wr(dev, MT_TX_PWR_CFG_9,
+                 mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);
+
+int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev)
+{
+       struct mt76x02_sta *sta;
+       struct mt76_wcid *wcid;
+       int i, j, min_rssi = 0;
+       s8 cur_rssi;
+
+       local_bh_disable();
+       rcu_read_lock();
+
+       for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+               unsigned long mask = dev->wcid_mask[i];
+
+               if (!mask)
+                       continue;
+
+               for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+                       if (!(mask & 1))
+                               continue;
+
+                       wcid = rcu_dereference(dev->wcid[j]);
+                       if (!wcid)
+                               continue;
+
+                       sta = container_of(wcid, struct mt76x02_sta, wcid);
+                       spin_lock(&dev->rx_lock);
+                       if (sta->inactive_count++ < 5)
+                               cur_rssi = ewma_signal_read(&sta->rssi);
+                       else
+                               cur_rssi = 0;
+                       spin_unlock(&dev->rx_lock);
+
+                       if (cur_rssi < min_rssi)
+                               min_rssi = cur_rssi;
+               }
+       }
+
+       rcu_read_unlock();
+       local_bh_enable();
+
+       if (!min_rssi)
+               return -75;
+
+       return min_rssi;
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
new file mode 100644 (file)
index 0000000..e70ea6e
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x02_PHY_H
+#define __MT76x02_PHY_H
+
+#include "mt76x02_regs.h"
+
+void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset);
+void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_2);
+void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit);
+int mt76x02_get_max_rate_power(struct mt76_rate_power *r);
+void mt76x02_phy_set_rxpath(struct mt76_dev *dev);
+void mt76x02_phy_set_txdac(struct mt76_dev *dev);
+int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev);
+
+#endif /* __MT76x02_PHY_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
new file mode 100644 (file)
index 0000000..24d1e6d
--- /dev/null
@@ -0,0 +1,685 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76X02_REGS_H
+#define __MT76X02_REGS_H
+
+#define MT_ASIC_VERSION                        0x0000
+
+#define MT76XX_REV_E3          0x22
+#define MT76XX_REV_E4          0x33
+
+#define MT_CMB_CTRL                    0x0020
+#define MT_CMB_CTRL_XTAL_RDY           BIT(22)
+#define MT_CMB_CTRL_PLL_LD             BIT(23)
+
+#define MT_EFUSE_CTRL                  0x0024
+#define MT_EFUSE_CTRL_AOUT             GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE             GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME     GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME      GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN              GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK             BIT(30)
+#define MT_EFUSE_CTRL_SEL              BIT(31)
+
+#define MT_EFUSE_DATA_BASE             0x0028
+#define MT_EFUSE_DATA(_n)              (MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0                    0x0040
+#define MT_COEXCFG0_COEX_EN            BIT(0)
+
+#define MT_WLAN_FUN_CTRL               0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN       BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN   BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+
+#define MT_COEXCFG3                    0x004c
+
+#define        MT_LDO_CTRL_0                   0x006c
+#define        MT_LDO_CTRL_1                   0x0070
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET    BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ  BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL        BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL   BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST     BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST     BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN    BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN       GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT      GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN   GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0                    0x0100
+#define MT_XO_CTRL1                    0x0104
+#define MT_XO_CTRL2                    0x0108
+#define MT_XO_CTRL3                    0x010c
+#define MT_XO_CTRL4                    0x0110
+
+#define MT_XO_CTRL5                    0x0114
+#define MT_XO_CTRL5_C2_VAL             GENMASK(14, 8)
+
+#define MT_XO_CTRL6                    0x0118
+#define MT_XO_CTRL6_C2_CTRL            GENMASK(14, 8)
+
+#define MT_XO_CTRL7                    0x011c
+
+#define MT_IOCFG_6                     0x0124
+
+#define MT_USB_U3DMA_CFG               0x9018
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT        GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_UDMA_TX_WL_DROP BIT(16)
+#define MT_USB_DMA_CFG_WAKE_UP_EN      BIT(17)
+#define MT_USB_DMA_CFG_RX_DROP_OR_PAD  BIT(18)
+#define MT_USB_DMA_CFG_TX_CLR          BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT       BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN  BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN      BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN      BIT(23)
+#define MT_USB_DMA_CFG_EP_OUT_VALID    GENMASK(29, 24)
+#define MT_USB_DMA_CFG_RX_BUSY         BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY         BIT(31)
+
+#define MT_WLAN_MTC_CTRL               0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK       BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S     BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD    GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD    BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD    BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD    BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB    BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB    BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB    BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB    BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP      BIT(28)
+
+#define MT_INT_SOURCE_CSR              0x0200
+#define MT_INT_MASK_CSR                        0x0204
+
+#define MT_INT_RX_DONE(_n)             BIT(_n)
+#define MT_INT_RX_DONE_ALL             GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL             GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n)             BIT(_n + 4)
+#define MT_INT_RX_COHERENT             BIT(16)
+#define MT_INT_TX_COHERENT             BIT(17)
+#define MT_INT_ANY_COHERENT            BIT(18)
+#define MT_INT_MCU_CMD                 BIT(19)
+#define MT_INT_TBTT                    BIT(20)
+#define MT_INT_PRE_TBTT                        BIT(21)
+#define MT_INT_TX_STAT                 BIT(22)
+#define MT_INT_AUTO_WAKEUP             BIT(23)
+#define MT_INT_GPTIMER                 BIT(24)
+#define MT_INT_RXDELAYINT              BIT(26)
+#define MT_INT_TXDELAYINT              BIT(27)
+
+#define MT_WPDMA_GLO_CFG               0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN     BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY   BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN     BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY   BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE        GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE     BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN    BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN   GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS  BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET  BIT(31)
+
+#define MT_WPDMA_RST_IDX               0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG         0x0210
+
+#define MT_WMM_AIFSN           0x0214
+#define MT_WMM_AIFSN_MASK              GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n)         ((_n) * 4)
+
+#define MT_WMM_CWMIN           0x0218
+#define MT_WMM_CWMIN_MASK              GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n)         ((_n) * 4)
+
+#define MT_WMM_CWMAX           0x021c
+#define MT_WMM_CWMAX_MASK              GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n)         ((_n) * 4)
+
+#define MT_WMM_TXOP_BASE               0x0220
+#define MT_WMM_TXOP(_n)                        (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n)          ((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK               GENMASK(15, 0)
+
+#define MT_WMM_CTRL                    0x0230 /* MT76x0 */
+#define MT_FCE_DMA_ADDR                        0x0230
+#define MT_FCE_DMA_LEN                 0x0234
+#define MT_USB_DMA_CFG                 0x0238
+
+#define MT_TSO_CTRL                    0x0250
+#define MT_HEADER_TRANS_CTRL_REG       0x0260
+
+#define MT_US_CYC_CFG                  0x02a4
+#define MT_US_CYC_CNT                  GENMASK(7, 0)
+
+#define MT_TX_RING_BASE                        0x0300
+#define MT_RX_RING_BASE                        0x03c0
+
+#define MT_TX_HW_QUEUE_MCU             8
+#define MT_TX_HW_QUEUE_MGMT            9
+
+#define MT_PBF_SYS_CTRL                        0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET      BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET      BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET      BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET      BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET      BIT(4)
+
+#define MT_PBF_CFG                     0x0404
+#define MT_PBF_CFG_TX0Q_EN             BIT(0)
+#define MT_PBF_CFG_TX1Q_EN             BIT(1)
+#define MT_PBF_CFG_TX2Q_EN             BIT(2)
+#define MT_PBF_CFG_TX3Q_EN             BIT(3)
+#define MT_PBF_CFG_RX0Q_EN             BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN          BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT             0x0408
+#define MT_PBF_RX_MAX_PCNT             0x040c
+
+#define MT_BCN_OFFSET_BASE             0x041c
+#define MT_BCN_OFFSET(_n)              (MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RXQ_STA                     0x0430
+#define MT_TXQ_STA                     0x0434
+#define        MT_RF_CSR_CFG                   0x0500
+#define MT_RF_CSR_CFG_DATA             GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID           GENMASK(13, 8)
+#define MT_RF_CSR_CFG_REG_BANK         GENMASK(17, 14)
+#define MT_RF_CSR_CFG_WR               BIT(30)
+#define MT_RF_CSR_CFG_KICK             BIT(31)
+
+#define MT_RF_BYPASS_0                 0x0504
+#define MT_RF_BYPASS_1                 0x0508
+#define MT_RF_SETTING_0                        0x050c
+
+#define MT_RF_MISC                     0x0518
+#define MT_RF_DATA_WRITE               0x0524
+
+#define MT_RF_CTRL                     0x0528
+#define MT_RF_CTRL_ADDR                        GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE               BIT(12)
+#define MT_RF_CTRL_BUSY                        BIT(13)
+#define MT_RF_CTRL_IDX                 BIT(16)
+
+#define MT_RF_DATA_READ                        0x052c
+
+#define MT_COM_REG0                    0x0730
+#define MT_COM_REG1                    0x0734
+#define MT_COM_REG2                    0x0738
+#define MT_COM_REG3                    0x073C
+
+#define MT_FCE_PSE_CTRL                        0x0800
+#define MT_FCE_PARAMETERS              0x0804
+#define MT_FCE_CSO                     0x0808
+
+#define MT_FCE_L2_STUFF                        0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN       BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN      BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN    BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN    BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP    BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN      GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT     GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1      0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR    0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT   0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX        0x09a8
+#define MT_FCE_PDMA_GLOBAL_CONF                0x09c4
+#define MT_FCE_SKIP_FS                 0x0a6c
+
+#define MT_PAUSE_ENABLE_CONTROL1       0x0a38
+
+#define MT_MAC_CSR0                    0x1000
+
+#define MT_MAC_SYS_CTRL                        0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR      BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP      BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX      BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX      BIT(3)
+
+#define MT_MAC_ADDR_DW0                        0x1008
+#define MT_MAC_ADDR_DW1                        0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK      GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0               0x1010
+#define MT_MAC_BSSID_DW1               0x1014
+#define MT_MAC_BSSID_DW1_ADDR          GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE     GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N     GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT        BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2  BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3  BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG                 0x1018
+#define MT_MAX_LEN_CFG_AMPDU           GENMASK(13, 12)
+
+#define MT_LED_CFG                     0x102c
+
+#define MT_AMPDU_MAX_LEN_20M1S         0x1030
+#define MT_AMPDU_MAX_LEN_20M2S         0x1034
+#define MT_AMPDU_MAX_LEN_40M1S         0x1038
+#define MT_AMPDU_MAX_LEN_40M2S         0x103c
+#define MT_AMPDU_MAX_LEN               0x1040
+
+#define MT_WCID_DROP_BASE              0x106c
+#define MT_WCID_DROP(_n)               (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n)          BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK             0x108c
+
+#define MT_MAC_APC_BSSID_BASE          0x1090
+#define MT_MAC_APC_BSSID_L(_n)         (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n)         (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR                GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN         BIT(16)
+
+#define MT_XIFS_TIME_CFG               0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS      GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS     GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS     GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS          GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN   BIT(29)
+
+#define MT_BKOFF_SLOT_CFG              0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME     GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY     GENMASK(11, 8)
+
+#define MT_CH_TIME_CFG                 0x110c
+#define MT_CH_TIME_CFG_TIMER_EN                BIT(0)
+#define MT_CH_TIME_CFG_TX_AS_BUSY      BIT(1)
+#define MT_CH_TIME_CFG_RX_AS_BUSY      BIT(2)
+#define MT_CH_TIME_CFG_NAV_AS_BUSY     BIT(3)
+#define MT_CH_TIME_CFG_EIFS_AS_BUSY    BIT(4)
+#define MT_CH_TIME_CFG_MDRDY_CNT_EN    BIT(5)
+#define MT_CH_TIME_CFG_CH_TIMER_CLR    GENMASK(9, 8)
+#define MT_CH_TIME_CFG_MDRDY_CLR       GENMASK(11, 10)
+
+#define MT_PBF_LIFE_TIMER              0x1110
+
+#define MT_BEACON_TIME_CFG             0x1114
+#define MT_BEACON_TIME_CFG_INTVAL      GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN    BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE   GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN     BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX   BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP    GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG               0x1118
+#define MT_TBTT_TIMER_CFG              0x1124
+
+#define MT_INT_TIMER_CFG               0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT      GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER      GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN                        0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN    BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN    BIT(1)
+
+#define MT_CH_IDLE                     0x1130
+#define MT_CH_BUSY                     0x1134
+#define MT_EXT_CH_BUSY                 0x1138
+#define MT_ED_CCA_TIMER                        0x1140
+
+#define MT_MAC_STATUS                  0x1200
+#define MT_MAC_STATUS_TX               BIT(0)
+#define MT_MAC_STATUS_RX               BIT(1)
+
+#define MT_PWR_PIN_CFG                 0x1204
+#define MT_AUX_CLK_CFG                 0x120c
+
+#define MT_BB_PA_MODE_CFG0             0x1214
+#define MT_BB_PA_MODE_CFG1             0x1218
+#define MT_RF_PA_MODE_CFG0             0x121c
+#define MT_RF_PA_MODE_CFG1             0x1220
+
+#define MT_RF_PA_MODE_ADJ0             0x1228
+#define MT_RF_PA_MODE_ADJ1             0x122c
+
+#define MT_DACCLK_EN_DLY_CFG           0x1264
+
+#define MT_EDCA_CFG_BASE               0x1300
+#define MT_EDCA_CFG_AC(_n)             (MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP               GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN              GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN              GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX              GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0                        0x1314
+#define MT_TX_PWR_CFG_1                        0x1318
+#define MT_TX_PWR_CFG_2                        0x131c
+#define MT_TX_PWR_CFG_3                        0x1320
+#define MT_TX_PWR_CFG_4                        0x1324
+#define MT_TX_PIN_CFG                  0x1328
+#define MT_TX_PIN_CFG_TXANT            GENMASK(3, 0)
+
+#define MT_TX_BAND_CFG                 0x132c
+#define MT_TX_BAND_CFG_UPPER_40M       BIT(0)
+#define MT_TX_BAND_CFG_5G              BIT(1)
+#define MT_TX_BAND_CFG_2G              BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY            0x1384
+#define MT_TX_MPDU_ADJ_INT             0x1388
+
+#define MT_TX_PWR_CFG_7                        0x13d4
+#define MT_TX_PWR_CFG_8                        0x13d8
+#define MT_TX_PWR_CFG_9                        0x13dc
+
+#define MT_TX_SW_CFG0                  0x1330
+#define MT_TX_SW_CFG1                  0x1334
+#define MT_TX_SW_CFG2                  0x1338
+
+#define MT_TXOP_CTRL_CFG               0x1340
+#define MT_TXOP_TRUN_EN                        GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY            GENMASK(15, 8)
+
+#define MT_TX_RTS_CFG                  0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT      GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH           GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK             BIT(24)
+
+#define MT_TX_TIMEOUT_CFG              0x1348
+#define MT_TX_TIMEOUT_CFG_ACKTO                GENMASK(15, 8)
+
+#define MT_TX_RETRY_CFG                        0x134c
+#define MT_TX_LINK_CFG                 0x1350
+#define MT_VHT_HT_FBK_CFG0             0x1354
+#define MT_VHT_HT_FBK_CFG1             0x1358
+#define MT_LG_FBK_CFG0                 0x135c
+#define MT_LG_FBK_CFG1                 0x1360
+
+#define MT_PROT_CFG_RATE               GENMASK(15, 0)
+#define MT_PROT_CFG_CTRL               GENMASK(17, 16)
+#define MT_PROT_CFG_NAV                        GENMASK(19, 18)
+#define MT_PROT_CFG_TXOP_ALLOW         GENMASK(25, 20)
+#define MT_PROT_CFG_RTS_THRESH         BIT(26)
+
+#define MT_CCK_PROT_CFG                        0x1364
+#define MT_OFDM_PROT_CFG               0x1368
+#define MT_MM20_PROT_CFG               0x136c
+#define MT_MM40_PROT_CFG               0x1370
+#define MT_GF20_PROT_CFG               0x1374
+#define MT_GF40_PROT_CFG               0x1378
+
+#define MT_PROT_RATE                   GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS           BIT(16)
+#define MT_PROT_CTRL_CTS2SELF          BIT(17)
+#define MT_PROT_NAV_SHORT              BIT(18)
+#define MT_PROT_NAV_LONG               BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK         BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM                BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20                BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40                BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20                BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40                BIT(25)
+#define MT_PROT_RTS_THR_EN             BIT(26)
+#define MT_PROT_RATE_CCK_11            0x0003
+#define MT_PROT_RATE_OFDM_6            0x4000
+#define MT_PROT_RATE_OFDM_24           0x4004
+#define MT_PROT_RATE_DUP_OFDM_24       0x4084
+#define MT_PROT_TXOP_ALLOW_ALL         GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20                (MT_PROT_TXOP_ALLOW_ALL &       \
+                                        ~MT_PROT_TXOP_ALLOW_MM40 &     \
+                                        ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME                        0x1380
+
+#define MT_TX_PWR_CFG_0_EXT            0x1390
+#define MT_TX_PWR_CFG_1_EXT            0x1394
+
+#define MT_TX_FBK_LIMIT                        0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK       GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK      GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR  BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT       BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR            0x13a0
+#define MT_TX1_RF_GAIN_CORR            0x13a4
+#define MT_TX0_RF_GAIN_ATTEN           0x13a8
+#define MT_TX0_RF_GAIN_ATTEN           0x13a8 /* MT76x0 */
+
+#define MT_TX_ALC_CFG_0                        0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0      GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1      GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0                GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1                GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1                        0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP      GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2                        0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP      GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_3                        0x13ac
+#define MT_TX_ALC_CFG_4                        0x13c0
+#define MT_TX_ALC_CFG_4_LOWGAIN_CH_EN  BIT(31)
+#define MT_TX0_BB_GAIN_ATTEN           0x13c0 /* MT76x0 */
+
+#define MT_TX_ALC_VGA3                 0x13c8
+
+#define MT_TX_PROT_CFG6                        0x13e0
+#define MT_TX_PROT_CFG7                        0x13e4
+#define MT_TX_PROT_CFG8                        0x13e8
+
+#define MT_PIFS_TX_CFG                 0x13ec
+
+#define MT_RX_FILTR_CFG                        0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR                BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR                BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC                BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS      BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR                BIT(4)
+#define MT_RX_FILTR_CFG_MCAST          BIT(5)
+#define MT_RX_FILTR_CFG_BCAST          BIT(6)
+#define MT_RX_FILTR_CFG_DUP            BIT(7)
+#define MT_RX_FILTR_CFG_CFACK          BIT(8)
+#define MT_RX_FILTR_CFG_CFEND          BIT(9)
+#define MT_RX_FILTR_CFG_ACK            BIT(10)
+#define MT_RX_FILTR_CFG_CTS            BIT(11)
+#define MT_RX_FILTR_CFG_RTS            BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL         BIT(13)
+#define MT_RX_FILTR_CFG_BA             BIT(14)
+#define MT_RX_FILTR_CFG_BAR            BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV       BIT(16)
+
+#define MT_AUTO_RSP_CFG                        0x1404
+#define MT_AUTO_RSP_PREAMB_SHORT       BIT(4)
+#define MT_LEGACY_BASIC_RATE           0x1408
+#define MT_HT_BASIC_RATE               0x140c
+
+#define MT_HT_CTRL_CFG                 0x1410
+#define MT_RX_PARSER_CFG               0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL    BIT(0)
+
+#define MT_EXT_CCA_CFG                 0x141c
+#define MT_EXT_CCA_CFG_CCA0            GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1            GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2            GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3            GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK                GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK     GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3                  0x1478
+
+#define MT_PN_PAD_MODE                 0x150c
+
+#define MT_TXOP_HLDR_ET                        0x1608
+
+#define MT_PROT_AUTO_TX_CFG            0x1648
+#define MT_PROT_AUTO_TX_CFG_PROT_PADJ  GENMASK(11, 8)
+#define MT_PROT_AUTO_TX_CFG_AUTO_PADJ  GENMASK(27, 24)
+
+#define MT_RX_STAT_0                   0x1700
+#define MT_RX_STAT_0_CRC_ERRORS                GENMASK(15, 0)
+#define MT_RX_STAT_0_PHY_ERRORS                GENMASK(31, 16)
+
+#define MT_RX_STAT_1                   0x1704
+#define MT_RX_STAT_1_CCA_ERRORS                GENMASK(15, 0)
+#define MT_RX_STAT_1_PLCP_ERRORS       GENMASK(31, 16)
+
+#define MT_RX_STAT_2                   0x1708
+#define MT_RX_STAT_2_DUP_ERRORS                GENMASK(15, 0)
+#define MT_RX_STAT_2_OVERFLOW_ERRORS   GENMASK(31, 16)
+
+#define MT_TX_STA_0                    0x170c
+#define MT_TX_STA_1                    0x1710
+#define MT_TX_STA_2                    0x1714
+
+#define MT_TX_STAT_FIFO                        0x1718
+#define MT_TX_STAT_FIFO_VALID          BIT(0)
+#define MT_TX_STAT_FIFO_SUCCESS                BIT(5)
+#define MT_TX_STAT_FIFO_AGGR           BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ         BIT(7)
+#define MT_TX_STAT_FIFO_WCID           GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE           GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT                 0x171c
+
+#define MT_TX_AGG_CNT_BASE0            0x1720
+#define MT_MPDU_DENSITY_CNT            0x1740
+#define MT_TX_AGG_CNT_BASE1            0x174c
+
+#define MT_TX_AGG_CNT(_id)             ((_id) < 8 ?                    \
+                                        MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+                                        MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT            0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY      GENMASK(7, 0)
+#define MT_TX_STAT_FIFO_EXT_PKTID      GENMASK(15, 8)
+
+#define MT_WCID_TX_RATE_BASE           0x1c00
+#define MT_WCID_TX_RATE(_i)            (MT_WCID_TX_RATE_BASE + ((_i) << 3))
+
+#define MT_BBP_CORE_BASE               0x2000
+#define MT_BBP_IBI_BASE                        0x2100
+#define MT_BBP_AGC_BASE                        0x2300
+#define MT_BBP_TXC_BASE                        0x2400
+#define MT_BBP_RXC_BASE                        0x2500
+#define MT_BBP_TXO_BASE                        0x2600
+#define MT_BBP_TXBE_BASE               0x2700
+#define MT_BBP_RXFE_BASE               0x2800
+#define MT_BBP_RXO_BASE                        0x2900
+#define MT_BBP_DFS_BASE                        0x2a00
+#define MT_BBP_TR_BASE                 0x2b00
+#define MT_BBP_CAL_BASE                        0x2c00
+#define MT_BBP_DSC_BASE                        0x2e00
+#define MT_BBP_PFMU_BASE               0x2f00
+
+#define MT_BBP(_type, _n)              (MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW              GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN                GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW               GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_HIGH_GAIN       GENMASK(21, 16)
+#define MT_BBP_AGC_LNA_MID_GAIN                GENMASK(13, 8)
+#define MT_BBP_AGC_LNA_LOW_GAIN                GENMASK(5, 0)
+
+/* AGC, R6/R7 */
+#define MT_BBP_AGC_LNA_ULOW_GAIN       GENMASK(5, 0)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_LNA_GAIN_MODE       GENMASK(7, 6)
+#define MT_BBP_AGC_GAIN                        GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0             GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1             GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN       GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE              0x1800
+#define MT_WCID_ADDR(_n)               (MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE                   0x4000
+
+#define MT_WCID_KEY_BASE               0x8000
+#define MT_WCID_KEY(_n)                        (MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE                        0xa000
+#define MT_WCID_IV(_n)                 (MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE              0xa800
+#define MT_WCID_ATTR(_n)               (MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE          BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE         GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX           GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF          GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT     BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT       BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC         BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID                GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0                 0xac00
+#define MT_SKEY_BASE_1                 0xb400
+#define MT_SKEY_0(_bss, _idx)          (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx)          (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx)            ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0            0xb000
+#define MT_SKEY_MODE_BASE_1            0xb3f0
+#define MT_SKEY_MODE_0(_bss)           (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss)           (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss)             ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK              GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE                 0xc000
+
+#define MT_TEMP_SENSOR                 0x1d000
+#define MT_TEMP_SENSOR_VAL             GENMASK(6, 0)
+
+struct mt76_wcid_addr {
+       u8 macaddr[6];
+       __le16 ba_mask;
+} __packed __aligned(4);
+
+struct mt76_wcid_key {
+       u8 key[16];
+       u8 tx_mic[8];
+       u8 rx_mic[8];
+} __packed __aligned(4);
+
+enum mt76x02_cipher_type {
+       MT_CIPHER_NONE,
+       MT_CIPHER_WEP40,
+       MT_CIPHER_WEP104,
+       MT_CIPHER_TKIP,
+       MT_CIPHER_AES_CCMP,
+       MT_CIPHER_CKIP40,
+       MT_CIPHER_CKIP104,
+       MT_CIPHER_CKIP128,
+       MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c
new file mode 100644 (file)
index 0000000..5b42d2c
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mt76x02_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
new file mode 100644 (file)
index 0000000..713f12d
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76x02_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76x02_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76x02.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76x02
+
+#define MAXNAME                32
+#define DEV_ENTRY      __array(char, wiphy_name, 32)
+#define DEV_ASSIGN     strlcpy(__entry->wiphy_name, wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
+#define DEV_PR_FMT     "%s"
+#define DEV_PR_ARG     __entry->wiphy_name
+
+#define TXID_ENTRY     __field(u8, wcid) __field(u8, pktid)
+#define TXID_ASSIGN    __entry->wcid = wcid; __entry->pktid = pktid
+#define TXID_PR_FMT    " [%d:%d]"
+#define TXID_PR_ARG    __entry->wcid, __entry->pktid
+
+DECLARE_EVENT_CLASS(dev_evt,
+       TP_PROTO(struct mt76x02_dev *dev),
+       TP_ARGS(dev),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+       ),
+       TP_printk(DEV_PR_FMT, DEV_PR_ARG)
+);
+
+DECLARE_EVENT_CLASS(dev_txid_evt,
+       TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
+       TP_ARGS(dev, wcid, pktid),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               TXID_ENTRY
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               TXID_ASSIGN;
+       ),
+       TP_printk(
+               DEV_PR_FMT TXID_PR_FMT,
+               DEV_PR_ARG, TXID_PR_ARG
+       )
+);
+
+DEFINE_EVENT(dev_txid_evt, mac_txdone_add,
+       TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
+       TP_ARGS(dev, wcid, pktid)
+);
+
+DEFINE_EVENT(dev_evt, mac_txstat_poll,
+       TP_PROTO(struct mt76x02_dev *dev),
+       TP_ARGS(dev)
+);
+
+TRACE_EVENT(mac_txstat_fetch,
+       TP_PROTO(struct mt76x02_dev *dev,
+                struct mt76x02_tx_status *stat),
+
+       TP_ARGS(dev, stat),
+
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               TXID_ENTRY
+               __field(bool, success)
+               __field(bool, aggr)
+               __field(bool, ack_req)
+               __field(u16, rate)
+               __field(u8, retry)
+       ),
+
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->success = stat->success;
+               __entry->aggr = stat->aggr;
+               __entry->ack_req = stat->ack_req;
+               __entry->wcid = stat->wcid;
+               __entry->pktid = stat->pktid;
+               __entry->rate = stat->rate;
+               __entry->retry = stat->retry;
+       ),
+
+       TP_printk(
+               DEV_PR_FMT TXID_PR_FMT
+               " success:%d aggr:%d ack_req:%d"
+               " rate:%04x retry:%d",
+               DEV_PR_ARG, TXID_PR_ARG,
+               __entry->success, __entry->aggr, __entry->ack_req,
+               __entry->rate, __entry->retry
+       )
+);
+
+TRACE_EVENT(dev_irq,
+       TP_PROTO(struct mt76x02_dev *dev, u32 val, u32 mask),
+
+       TP_ARGS(dev, val, mask),
+
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(u32, val)
+               __field(u32, mask)
+       ),
+
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->val = val;
+               __entry->mask = mask;
+       ),
+
+       TP_printk(
+               DEV_PR_FMT " %08x & %08x",
+               DEV_PR_ARG, __entry->val, __entry->mask
+       )
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mt76x02_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
new file mode 100644 (file)
index 0000000..8303772
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include "mt76x02.h"
+
+void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+               struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct mt76x02_dev *dev = hw->priv;
+       struct ieee80211_vif *vif = info->control.vif;
+       struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+
+       if (control->sta) {
+               struct mt76x02_sta *msta;
+
+               msta = (struct mt76x02_sta *)control->sta->drv_priv;
+               wcid = &msta->wcid;
+               /* sw encrypted frames */
+               if (!info->control.hw_key && wcid->hw_key_idx != 0xff)
+                       control->sta = NULL;
+       }
+
+       if (vif && !control->sta) {
+               struct mt76x02_vif *mvif;
+
+               mvif = (struct mt76x02_vif *)vif->drv_priv;
+               wcid = &mvif->group_wcid;
+       }
+
+       mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx);
+
+void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+                         struct sk_buff *skb)
+{
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+       void *rxwi = skb->data;
+
+       if (q == MT_RXQ_MCU) {
+               /* this is used just by mmio code */
+               skb_queue_tail(&mdev->mmio.mcu.res_q, skb);
+               wake_up(&mdev->mmio.mcu.wait);
+               return;
+       }
+
+       skb_pull(skb, sizeof(struct mt76x02_rxwi));
+       if (mt76x02_mac_process_rx(dev, skb, rxwi)) {
+               dev_kfree_skb(skb);
+               return;
+       }
+
+       mt76_rx(mdev, q, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb);
+
+s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
+                               const struct ieee80211_tx_rate *rate)
+{
+       s8 max_txpwr;
+
+       if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+               u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+               if (mcs == 8 || mcs == 9) {
+                       max_txpwr = dev->rate_power.vht[8];
+               } else {
+                       u8 nss, idx;
+
+                       nss = ieee80211_rate_get_vht_nss(rate);
+                       idx = ((nss - 1) << 3) + mcs;
+                       max_txpwr = dev->rate_power.ht[idx & 0xf];
+               }
+       } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+               max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
+       } else {
+               enum nl80211_band band = dev->chandef.chan->band;
+
+               if (band == NL80211_BAND_2GHZ) {
+                       const struct ieee80211_rate *r;
+                       struct wiphy *wiphy = dev->hw->wiphy;
+                       struct mt76_rate_power *rp = &dev->rate_power;
+
+                       r = &wiphy->bands[band]->bitrates[rate->idx];
+                       if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+                               max_txpwr = rp->cck[r->hw_value & 0x3];
+                       else
+                               max_txpwr = rp->ofdm[r->hw_value & 0x7];
+               } else {
+                       max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
+               }
+       }
+
+       return max_txpwr;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_get_max_txpwr_adj);
+
+s8 mt76x02_tx_get_txpwr_adj(struct mt76_dev *mdev, s8 txpwr, s8 max_txpwr_adj)
+{
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+       txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf);
+       txpwr -= (dev->target_power + dev->target_power_delta[0]);
+       txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+       if (!dev->enable_tpc)
+               return 0;
+       else if (txpwr >= 0)
+               return min_t(s8, txpwr, 7);
+       else
+               return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_get_txpwr_adj);
+
+void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
+{
+       s8 txpwr_adj;
+
+       txpwr_adj = mt76x02_tx_get_txpwr_adj(&dev->mt76, txpwr,
+                                            dev->mt76.rate_power.ofdm[4]);
+       mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+                      MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+       mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+                      MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_set_txpwr_auto);
+
+void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+       if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+               ieee80211_free_txskb(dev->hw, skb);
+       } else {
+               ieee80211_tx_info_clear_status(info);
+               info->status.rates[0].idx = -1;
+               info->flags |= IEEE80211_TX_STAT_ACK;
+               ieee80211_tx_status(dev->hw, skb);
+       }
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_complete);
+
+bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update)
+{
+       struct mt76x02_tx_status stat;
+
+       if (!mt76x02_mac_load_tx_status(dev, &stat))
+               return false;
+
+       mt76x02_send_tx_status(dev, &stat, update);
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
+
+int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+                          struct sk_buff *skb, struct mt76_queue *q,
+                          struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+                          u32 *tx_info)
+{
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       int qsel = MT_QSEL_EDCA;
+       int ret;
+
+       if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
+               mt76x02_mac_wcid_set_drop(&dev->mt76, wcid->idx, false);
+
+       mt76x02_mac_write_txwi(mdev, txwi, skb, wcid, sta, skb->len);
+
+       ret = mt76x02_insert_hdr_pad(skb);
+       if (ret < 0)
+               return ret;
+
+       if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+               qsel = MT_QSEL_MGMT;
+
+       *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+                  MT_TXD_INFO_80211;
+
+       if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+               *tx_info |= MT_TXD_INFO_WIV;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
new file mode 100644 (file)
index 0000000..6b21383
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x02_USB_H
+#define __MT76x02_USB_H
+
+#include "mt76.h"
+
+void mt76x02u_init_mcu(struct mt76_dev *dev);
+void mt76x02u_mcu_fw_reset(struct mt76_dev *dev);
+int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+                             int data_len, u32 max_payload, u32 offset);
+
+int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
+int mt76x02u_tx_prepare_skb(struct mt76_dev *dev, void *data,
+                           struct sk_buff *skb, struct mt76_queue *q,
+                           struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+                           u32 *tx_info);
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+                             struct mt76_queue_entry *e, bool flush);
+#endif /* __MT76x02_USB_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
new file mode 100644 (file)
index 0000000..7c6c973
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x02.h"
+
+static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
+{
+       int hdr_len;
+
+       skb_pull(skb, sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN);
+       hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+       if (hdr_len % 4)
+               mt76x02_remove_hdr_pad(skb, 2);
+}
+
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+                             struct mt76_queue_entry *e, bool flush)
+{
+       mt76x02u_remove_dma_hdr(e->skb);
+       mt76x02_tx_complete(mdev, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
+
+static int mt76x02u_check_skb_rooms(struct sk_buff *skb)
+{
+       int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+       u32 need_head;
+
+       need_head = sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN;
+       if (hdr_len % 4)
+               need_head += 2;
+       return skb_cow(skb, need_head);
+}
+
+int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
+{
+       struct sk_buff *iter, *last = skb;
+       u32 info, pad;
+
+       /* Buffer layout:
+        *      |   4B   | xfer len |      pad       |  4B  |
+        *      | TXINFO | pkt/cmd  | zero pad to 4B | zero |
+        *
+        * length field of TXINFO should be set to 'xfer len'.
+        */
+       info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+              FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
+       put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+
+       pad = round_up(skb->len, 4) + 4 - skb->len;
+       skb_walk_frags(skb, iter) {
+               last = iter;
+               if (!iter->next) {
+                       skb->data_len += pad;
+                       skb->len += pad;
+                       break;
+               }
+       }
+
+       if (unlikely(pad)) {
+               if (skb_pad(last, pad))
+                       return -ENOMEM;
+               __skb_put(last, pad);
+       }
+       return 0;
+}
+
+static int
+mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       enum mt76_qsel qsel;
+       u32 flags;
+
+       if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+           ep == MT_EP_OUT_HCCA)
+               qsel = MT_QSEL_MGMT;
+       else
+               qsel = MT_QSEL_EDCA;
+
+       flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+               MT_TXD_INFO_80211;
+       if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+               flags |= MT_TXD_INFO_WIV;
+
+       return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags);
+}
+
+int mt76x02u_tx_prepare_skb(struct mt76_dev *dev, void *data,
+                           struct sk_buff *skb, struct mt76_queue *q,
+                           struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+                           u32 *tx_info)
+{
+       struct mt76x02_txwi *txwi;
+       int err, len = skb->len;
+
+       err = mt76x02u_check_skb_rooms(skb);
+       if (err < 0)
+               return -ENOMEM;
+
+       mt76x02_insert_hdr_pad(skb);
+
+       txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
+       mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+
+       return mt76x02u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
+}
+EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
new file mode 100644 (file)
index 0000000..cb5f073
--- /dev/null
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include "mt76.h"
+#include "mt76x02_dma.h"
+#include "mt76x02_mcu.h"
+#include "mt76x02_usb.h"
+
+#define MT_CMD_HDR_LEN                 4
+
+#define MT_FCE_DMA_ADDR                        0x0230
+#define MT_FCE_DMA_LEN                 0x0234
+
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX        0x09a8
+
+static struct sk_buff *
+mt76x02u_mcu_msg_alloc(const void *data, int len)
+{
+       struct sk_buff *skb;
+
+       skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
+       if (!skb)
+               return NULL;
+
+       skb_reserve(skb, MT_CMD_HDR_LEN);
+       skb_put_data(skb, data, len);
+
+       return skb;
+}
+
+static void
+mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
+{
+       struct mt76_usb *usb = &dev->usb;
+       u32 reg, val;
+       int i;
+
+       if (usb->mcu.burst) {
+               WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
+
+               reg = usb->mcu.rp[0].reg - usb->mcu.base;
+               for (i = 0; i < usb->mcu.rp_len; i++) {
+                       val = get_unaligned_le32(data + 4 * i);
+                       usb->mcu.rp[i].reg = reg++;
+                       usb->mcu.rp[i].value = val;
+               }
+       } else {
+               WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
+
+               for (i = 0; i < usb->mcu.rp_len; i++) {
+                       reg = get_unaligned_le32(data + 8 * i) -
+                             usb->mcu.base;
+                       val = get_unaligned_le32(data + 8 * i + 4);
+
+                       WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
+                       usb->mcu.rp[i].value = val;
+               }
+       }
+}
+
+static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
+{
+       struct mt76_usb *usb = &dev->usb;
+       struct mt76u_buf *buf = &usb->mcu.res;
+       struct urb *urb = buf->urb;
+       int i, ret;
+       u32 rxfce;
+       u8 *data;
+
+       for (i = 0; i < 5; i++) {
+               if (!wait_for_completion_timeout(&usb->mcu.cmpl,
+                                                msecs_to_jiffies(300)))
+                       continue;
+
+               if (urb->status)
+                       return -EIO;
+
+               data = sg_virt(&urb->sg[0]);
+               if (usb->mcu.rp)
+                       mt76x02u_multiple_mcu_reads(dev, data + 4,
+                                                   urb->actual_length - 8);
+
+               rxfce = get_unaligned_le32(data);
+               ret = mt76u_submit_buf(dev, USB_DIR_IN,
+                                      MT_EP_IN_CMD_RESP,
+                                      buf, GFP_KERNEL,
+                                      mt76u_mcu_complete_urb,
+                                      &usb->mcu.cmpl);
+               if (ret)
+                       return ret;
+
+               if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
+                   FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
+                       return 0;
+
+               dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
+                       FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
+                       seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
+       }
+
+       dev_err(dev->dev, "error: %s timed out\n", __func__);
+       return -ETIMEDOUT;
+}
+
+static int
+__mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+                       int cmd, bool wait_resp)
+{
+       struct usb_interface *intf = to_usb_interface(dev->dev);
+       struct usb_device *udev = interface_to_usbdev(intf);
+       struct mt76_usb *usb = &dev->usb;
+       unsigned int pipe;
+       int ret, sent;
+       u8 seq = 0;
+       u32 info;
+
+       if (!skb)
+               return -EINVAL;
+
+       if (test_bit(MT76_REMOVED, &dev->state))
+               return 0;
+
+       pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
+       if (wait_resp) {
+               seq = ++usb->mcu.msg_seq & 0xf;
+               if (!seq)
+                       seq = ++usb->mcu.msg_seq & 0xf;
+       }
+
+       info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+              FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+              MT_MCU_MSG_TYPE_CMD;
+       ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info);
+       if (ret)
+               return ret;
+
+       ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
+       if (ret)
+               return ret;
+
+       if (wait_resp)
+               ret = mt76x02u_mcu_wait_resp(dev, seq);
+
+       consume_skb(skb);
+
+       return ret;
+}
+
+static int
+mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+                     int cmd, bool wait_resp)
+{
+       struct mt76_usb *usb = &dev->usb;
+       int err;
+
+       mutex_lock(&usb->mcu.mutex);
+       err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
+       mutex_unlock(&usb->mcu.mutex);
+
+       return err;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+       put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static int
+mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
+                  const struct mt76_reg_pair *data, int n)
+{
+       const int CMD_RANDOM_WRITE = 12;
+       const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
+       struct sk_buff *skb;
+       int cnt, i, ret;
+
+       if (!n)
+               return 0;
+
+       cnt = min(max_vals_per_cmd, n);
+
+       skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+       skb_reserve(skb, MT_DMA_HDR_LEN);
+
+       for (i = 0; i < cnt; i++) {
+               skb_put_le32(skb, base + data[i].reg);
+               skb_put_le32(skb, data[i].value);
+       }
+
+       ret = mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+       if (ret)
+               return ret;
+
+       return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt);
+}
+
+static int
+mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
+                  struct mt76_reg_pair *data, int n)
+{
+       const int CMD_RANDOM_READ = 10;
+       const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
+       struct mt76_usb *usb = &dev->usb;
+       struct sk_buff *skb;
+       int cnt, i, ret;
+
+       if (!n)
+               return 0;
+
+       cnt = min(max_vals_per_cmd, n);
+       if (cnt != n)
+               return -EINVAL;
+
+       skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+       skb_reserve(skb, MT_DMA_HDR_LEN);
+
+       for (i = 0; i < cnt; i++) {
+               skb_put_le32(skb, base + data[i].reg);
+               skb_put_le32(skb, data[i].value);
+       }
+
+       mutex_lock(&usb->mcu.mutex);
+
+       usb->mcu.rp = data;
+       usb->mcu.rp_len = n;
+       usb->mcu.base = base;
+       usb->mcu.burst = false;
+
+       ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
+
+       usb->mcu.rp = NULL;
+
+       mutex_unlock(&usb->mcu.mutex);
+
+       return ret;
+}
+
+void mt76x02u_mcu_fw_reset(struct mt76_dev *dev)
+{
+       mt76u_vendor_request(dev, MT_VEND_DEV_MODE,
+                            USB_DIR_OUT | USB_TYPE_VENDOR,
+                            0x1, 0, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
+
+static int
+__mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
+                           const void *fw_data, int len, u32 dst_addr)
+{
+       u8 *data = sg_virt(&buf->urb->sg[0]);
+       DECLARE_COMPLETION_ONSTACK(cmpl);
+       __le32 info;
+       u32 val;
+       int err;
+
+       info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+                          FIELD_PREP(MT_MCU_MSG_LEN, len) |
+                          MT_MCU_MSG_TYPE_CMD);
+
+       memcpy(data, &info, sizeof(info));
+       memcpy(data + sizeof(info), fw_data, len);
+       memset(data + sizeof(info) + len, 0, 4);
+
+       mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+                       MT_FCE_DMA_ADDR, dst_addr);
+       len = roundup(len, 4);
+       mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+                       MT_FCE_DMA_LEN, len << 16);
+
+       buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
+       err = mt76u_submit_buf(dev, USB_DIR_OUT,
+                              MT_EP_OUT_INBAND_CMD,
+                              buf, GFP_KERNEL,
+                              mt76u_mcu_complete_urb, &cmpl);
+       if (err < 0)
+               return err;
+
+       if (!wait_for_completion_timeout(&cmpl,
+                                        msecs_to_jiffies(1000))) {
+               dev_err(dev->dev, "firmware upload timed out\n");
+               usb_kill_urb(buf->urb);
+               return -ETIMEDOUT;
+       }
+
+       if (mt76u_urb_error(buf->urb)) {
+               dev_err(dev->dev, "firmware upload failed: %d\n",
+                       buf->urb->status);
+               return buf->urb->status;
+       }
+
+       val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+       val++;
+       mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+       return 0;
+}
+
+int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+                             int data_len, u32 max_payload, u32 offset)
+{
+       int err, len, pos = 0, max_len = max_payload - 8;
+       struct mt76u_buf buf;
+
+       err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload,
+                             GFP_KERNEL);
+       if (err < 0)
+               return err;
+
+       while (data_len > 0) {
+               len = min_t(int, data_len, max_len);
+               err = __mt76x02u_mcu_fw_send_data(dev, &buf, data + pos,
+                                                 len, offset + pos);
+               if (err < 0)
+                       break;
+
+               data_len -= len;
+               pos += len;
+               usleep_range(5000, 10000);
+       }
+       mt76u_buf_free(&buf);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
+
+void mt76x02u_init_mcu(struct mt76_dev *dev)
+{
+       static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
+               .mcu_msg_alloc = mt76x02u_mcu_msg_alloc,
+               .mcu_send_msg = mt76x02u_mcu_send_msg,
+               .mcu_wr_rp = mt76x02u_mcu_wr_rp,
+               .mcu_rd_rp = mt76x02u_mcu_rd_rp,
+       };
+
+       dev->mcu_ops = &mt76x02u_mcu_ops;
+}
+EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
new file mode 100644 (file)
index 0000000..5851ab6
--- /dev/null
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include "mt76x02.h"
+
+#define CCK_RATE(_idx, _rate) {                                        \
+       .bitrate = _rate,                                       \
+       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                 \
+       .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx,              \
+       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx),  \
+}
+
+#define OFDM_RATE(_idx, _rate) {                               \
+       .bitrate = _rate,                                       \
+       .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx,             \
+       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx,       \
+}
+
+struct ieee80211_rate mt76x02_rates[] = {
+       CCK_RATE(0, 10),
+       CCK_RATE(1, 20),
+       CCK_RATE(2, 55),
+       CCK_RATE(3, 110),
+       OFDM_RATE(0, 60),
+       OFDM_RATE(1, 90),
+       OFDM_RATE(2, 120),
+       OFDM_RATE(3, 180),
+       OFDM_RATE(4, 240),
+       OFDM_RATE(5, 360),
+       OFDM_RATE(6, 480),
+       OFDM_RATE(7, 540),
+};
+EXPORT_SYMBOL_GPL(mt76x02_rates);
+
+void mt76x02_configure_filter(struct ieee80211_hw *hw,
+                            unsigned int changed_flags,
+                            unsigned int *total_flags, u64 multicast)
+{
+       struct mt76_dev *dev = hw->priv;
+       u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+               flags |= *total_flags & FIF_##_flag;                    \
+               dev->rxfilter &= ~(_hw);                                \
+               dev->rxfilter |= !(flags & FIF_##_flag) * (_hw);        \
+       } while (0)
+
+       mutex_lock(&dev->mutex);
+
+       dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+       MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+       MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+       MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+                            MT_RX_FILTR_CFG_CTS |
+                            MT_RX_FILTR_CFG_CFEND |
+                            MT_RX_FILTR_CFG_CFACK |
+                            MT_RX_FILTR_CFG_BA |
+                            MT_RX_FILTR_CFG_CTRL_RSV);
+       MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+       *total_flags = flags;
+       dev->bus->wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+       mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x02_configure_filter);
+
+int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                  struct ieee80211_sta *sta)
+{
+       struct mt76_dev *dev = hw->priv;
+       struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
+       struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
+       int ret = 0;
+       int idx = 0;
+       int i;
+
+       mutex_lock(&dev->mutex);
+
+       idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+       if (idx < 0) {
+               ret = -ENOSPC;
+               goto out;
+       }
+
+       msta->vif = mvif;
+       msta->wcid.sta = 1;
+       msta->wcid.idx = idx;
+       msta->wcid.hw_key_idx = -1;
+       mt76x02_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+       mt76x02_mac_wcid_set_drop(dev, idx, false);
+       for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+               mt76x02_txq_init(dev, sta->txq[i]);
+
+       if (vif->type == NL80211_IFTYPE_AP)
+               set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
+
+       ewma_signal_init(&msta->rssi);
+
+       rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+
+out:
+       mutex_unlock(&dev->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_add);
+
+int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                     struct ieee80211_sta *sta)
+{
+       struct mt76_dev *dev = hw->priv;
+       struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
+       int idx = msta->wcid.idx;
+       int i;
+
+       mutex_lock(&dev->mutex);
+       rcu_assign_pointer(dev->wcid[idx], NULL);
+       for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+               mt76_txq_remove(dev, sta->txq[i]);
+       mt76x02_mac_wcid_set_drop(dev, idx, true);
+       mt76_wcid_free(dev->wcid_mask, idx);
+       mt76x02_mac_wcid_setup(dev, idx, 0, NULL);
+       mutex_unlock(&dev->mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_remove);
+
+void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif,
+                    unsigned int idx)
+{
+       struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
+
+       mvif->idx = idx;
+       mvif->group_wcid.idx = MT_VIF_WCID(idx);
+       mvif->group_wcid.hw_key_idx = -1;
+       mt76x02_txq_init(dev, vif->txq);
+}
+EXPORT_SYMBOL_GPL(mt76x02_vif_init);
+
+int
+mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct mt76_dev *dev = hw->priv;
+       unsigned int idx = 0;
+
+       if (vif->addr[0] & BIT(1))
+               idx = 1 + (((dev->macaddr[0] ^ vif->addr[0]) >> 2) & 7);
+
+       /*
+        * Client mode typically only has one configurable BSSID register,
+        * which is used for bssidx=0. This is linked to the MAC address.
+        * Since mac80211 allows changing interface types, and we cannot
+        * force the use of the primary MAC address for a station mode
+        * interface, we need some other way of configuring a per-interface
+        * remote BSSID.
+        * The hardware provides an AP-Client feature, where bssidx 0-7 are
+        * used for AP mode and bssidx 8-15 for client mode.
+        * We shift the station interface bss index by 8 to force the
+        * hardware to recognize the BSSID.
+        * The resulting bssidx mismatch for unicast frames is ignored by hw.
+        */
+       if (vif->type == NL80211_IFTYPE_STATION)
+               idx += 8;
+
+       mt76x02_vif_init(dev, vif, idx);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_add_interface);
+
+void mt76x02_remove_interface(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif)
+{
+       struct mt76_dev *dev = hw->priv;
+
+       mt76_txq_remove(dev, vif->txq);
+}
+EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
+
+int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                       struct ieee80211_ampdu_params *params)
+{
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       struct ieee80211_sta *sta = params->sta;
+       struct mt76_dev *dev = hw->priv;
+       struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
+       struct ieee80211_txq *txq = sta->txq[params->tid];
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
+       struct mt76_txq *mtxq;
+
+       if (!txq)
+               return -EINVAL;
+
+       mtxq = (struct mt76_txq *)txq->drv_priv;
+
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               mt76_rx_aggr_start(dev, &msta->wcid, tid, *ssn, params->buf_size);
+               __mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+               break;
+       case IEEE80211_AMPDU_RX_STOP:
+               mt76_rx_aggr_stop(dev, &msta->wcid, tid);
+               __mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+               break;
+       case IEEE80211_AMPDU_TX_OPERATIONAL:
+               mtxq->aggr = true;
+               mtxq->send_bar = false;
+               ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+               break;
+       case IEEE80211_AMPDU_TX_STOP_FLUSH:
+       case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+               mtxq->aggr = false;
+               ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+               break;
+       case IEEE80211_AMPDU_TX_START:
+               mtxq->agg_ssn = *ssn << 4;
+               ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               break;
+       case IEEE80211_AMPDU_TX_STOP_CONT:
+               mtxq->aggr = false;
+               ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               break;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_ampdu_action);
+
+int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                  struct ieee80211_key_conf *key)
+{
+       struct mt76_dev *dev = hw->priv;
+       struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
+       struct mt76x02_sta *msta;
+       struct mt76_wcid *wcid;
+       int idx = key->keyidx;
+       int ret;
+
+       /* fall back to sw encryption for unsupported ciphers */
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+       case WLAN_CIPHER_SUITE_TKIP:
+       case WLAN_CIPHER_SUITE_CCMP:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       /*
+        * The hardware does not support per-STA RX GTK, fall back
+        * to software mode for these.
+        */
+       if ((vif->type == NL80211_IFTYPE_ADHOC ||
+            vif->type == NL80211_IFTYPE_MESH_POINT) &&
+           (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+            key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               return -EOPNOTSUPP;
+
+       msta = sta ? (struct mt76x02_sta *) sta->drv_priv : NULL;
+       wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+       if (cmd == SET_KEY) {
+               key->hw_key_idx = wcid->idx;
+               wcid->hw_key_idx = idx;
+               if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+                       key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+                       wcid->sw_iv = true;
+               }
+       } else {
+               if (idx == wcid->hw_key_idx) {
+                       wcid->hw_key_idx = -1;
+                       wcid->sw_iv = true;
+               }
+
+               key = NULL;
+       }
+       mt76_wcid_key_setup(dev, wcid, key);
+
+       if (!msta) {
+               if (key || wcid->hw_key_idx == idx) {
+                       ret = mt76x02_mac_wcid_set_key(dev, wcid->idx, key);
+                       if (ret)
+                               return ret;
+               }
+
+               return mt76x02_mac_shared_key_setup(dev, mvif->idx, idx, key);
+       }
+
+       return mt76x02_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_key);
+
+int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                  u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+       struct mt76_dev *dev = hw->priv;
+       u8 cw_min = 5, cw_max = 10, qid;
+       u32 val;
+
+       qid = dev->q_tx[queue].hw_idx;
+
+       if (params->cw_min)
+               cw_min = fls(params->cw_min);
+       if (params->cw_max)
+               cw_max = fls(params->cw_max);
+
+       val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+             FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+             FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+             FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+       __mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+
+       val = __mt76_rr(dev, MT_WMM_TXOP(qid));
+       val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
+       val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
+       __mt76_wr(dev, MT_WMM_TXOP(qid), val);
+
+       val = __mt76_rr(dev, MT_WMM_AIFSN);
+       val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
+       val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
+       __mt76_wr(dev, MT_WMM_AIFSN, val);
+
+       val = __mt76_rr(dev, MT_WMM_CWMIN);
+       val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
+       val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
+       __mt76_wr(dev, MT_WMM_CWMIN, val);
+
+       val = __mt76_rr(dev, MT_WMM_CWMAX);
+       val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
+       val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
+       __mt76_wr(dev, MT_WMM_CWMAX, val);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_conf_tx);
+
+void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_sta *sta)
+{
+       struct mt76_dev *dev = hw->priv;
+       struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
+       struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+       struct ieee80211_tx_rate rate = {};
+
+       if (!rates)
+               return;
+
+       rate.idx = rates->rate[0].idx;
+       rate.flags = rates->rate[0].flags;
+       mt76x02_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+       msta->wcid.max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, &rate);
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_rate_tbl_update);
+
+int mt76x02_insert_hdr_pad(struct sk_buff *skb)
+{
+       int len = ieee80211_get_hdrlen_from_skb(skb);
+
+       if (len % 4 == 0)
+               return 0;
+
+       skb_push(skb, 2);
+       memmove(skb->data, skb->data + 2, len);
+
+       skb->data[len] = 0;
+       skb->data[len + 1] = 0;
+       return 2;
+}
+EXPORT_SYMBOL_GPL(mt76x02_insert_hdr_pad);
+
+void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len)
+{
+       int hdrlen;
+
+       if (!len)
+               return;
+
+       hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+       memmove(skb->data + len, skb->data, hdrlen);
+       skb_pull(skb, len);
+}
+EXPORT_SYMBOL_GPL(mt76x02_remove_hdr_pad);
+
+const u16 mt76x02_beacon_offsets[16] = {
+       /* 1024 byte per beacon */
+       0xc000,
+       0xc400,
+       0xc800,
+       0xcc00,
+       0xd000,
+       0xd400,
+       0xd800,
+       0xdc00,
+       /* BSS idx 8-15 not used for beacons */
+       0xc000,
+       0xc000,
+       0xc000,
+       0xc000,
+       0xc000,
+       0xc000,
+       0xc000,
+       0xc000,
+};
+EXPORT_SYMBOL_GPL(mt76x02_beacon_offsets);
+
+void mt76x02_set_beacon_offsets(struct mt76_dev *dev)
+{
+       u16 val, base = MT_BEACON_BASE;
+       u32 regs[4] = {};
+       int i;
+
+       for (i = 0; i < 16; i++) {
+               val = mt76x02_beacon_offsets[i] - base;
+               regs[i / 4] |= (val / 64) << (8 * (i % 4));
+       }
+
+       for (i = 0; i < 4; i++)
+               __mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_beacon_offsets);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
deleted file mode 100644 (file)
index dca3209..0000000
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __MT76x2_H
-#define __MT76x2_H
-
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/bitops.h>
-#include <linux/kfifo.h>
-#include <linux/average.h>
-
-#define MT7662_FIRMWARE                "mt7662.bin"
-#define MT7662_ROM_PATCH       "mt7662_rom_patch.bin"
-#define MT7662_EEPROM_SIZE     512
-
-#define MT7662U_FIRMWARE       "mediatek/mt7662u.bin"
-#define MT7662U_ROM_PATCH      "mediatek/mt7662u_rom_patch.bin"
-
-#define MT76x2_RX_RING_SIZE    256
-#define MT_RX_HEADROOM         32
-
-#define MT_MAX_CHAINS          2
-
-#define MT_CALIBRATE_INTERVAL  HZ
-
-#define MT_MAX_VIFS            8
-#define MT_VIF_WCID(_n)                (254 - ((_n) & 7))
-
-#include "mt76.h"
-#include "mt76x2_regs.h"
-#include "mt76x2_mac.h"
-#include "mt76x2_dfs.h"
-
-DECLARE_EWMA(signal, 10, 8)
-
-struct mt76x2_mcu {
-       struct mutex mutex;
-
-       wait_queue_head_t wait;
-       struct sk_buff_head res_q;
-       struct mt76u_buf res_u;
-
-       u32 msg_seq;
-};
-
-struct mt76x2_rx_freq_cal {
-       s8 high_gain[MT_MAX_CHAINS];
-       s8 rssi_offset[MT_MAX_CHAINS];
-       s8 lna_gain;
-       u32 mcu_gain;
-};
-
-struct mt76x2_calibration {
-       struct mt76x2_rx_freq_cal rx;
-
-       u8 agc_gain_init[MT_MAX_CHAINS];
-       u8 agc_gain_cur[MT_MAX_CHAINS];
-
-       u16 false_cca;
-       s8 avg_rssi_all;
-       s8 agc_gain_adjust;
-       s8 low_gain;
-
-       u8 temp;
-
-       bool init_cal_done;
-       bool tssi_cal_done;
-       bool tssi_comp_pending;
-       bool dpd_cal_done;
-       bool channel_cal_done;
-};
-
-struct mt76x2_dev {
-       struct mt76_dev mt76; /* must be first */
-
-       struct mac_address macaddr_list[8];
-
-       struct mutex mutex;
-
-       const u16 *beacon_offsets;
-       unsigned long wcid_mask[128 / BITS_PER_LONG];
-
-       int txpower_conf;
-       int txpower_cur;
-
-       u8 txdone_seq;
-       DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x2_tx_status);
-
-       struct mt76x2_mcu mcu;
-       struct sk_buff *rx_head;
-
-       struct tasklet_struct tx_tasklet;
-       struct tasklet_struct pre_tbtt_tasklet;
-       struct delayed_work cal_work;
-       struct delayed_work mac_work;
-
-       u32 aggr_stats[32];
-
-       struct mt76_wcid global_wcid;
-       struct mt76_wcid __rcu *wcid[128];
-
-       spinlock_t irq_lock;
-       u32 irqmask;
-
-       struct sk_buff *beacons[8];
-       u8 beacon_mask;
-       u8 beacon_data_mask;
-
-       u8 tbtt_count;
-       u16 beacon_int;
-
-       u16 chainmask;
-
-       u32 rxfilter;
-
-       struct mt76x2_calibration cal;
-
-       s8 target_power;
-       s8 target_power_delta[2];
-       struct mt76_rate_power rate_power;
-       bool enable_tpc;
-
-       u8 coverage_class;
-       u8 slottime;
-
-       struct mt76x2_dfs_pattern_detector dfs_pd;
-};
-
-struct mt76x2_vif {
-       u8 idx;
-
-       struct mt76_wcid group_wcid;
-};
-
-struct mt76x2_sta {
-       struct mt76_wcid wcid; /* must be first */
-
-       struct mt76x2_vif *vif;
-       struct mt76x2_tx_status status;
-       int n_frames;
-
-       struct ewma_signal rssi;
-       int inactive_count;
-};
-
-static inline bool mt76x2_wait_for_mac(struct mt76x2_dev *dev)
-{
-       int i;
-
-       for (i = 0; i < 500; i++) {
-               switch (mt76_rr(dev, MT_MAC_CSR0)) {
-               case 0:
-               case ~0:
-                       break;
-               default:
-                       return true;
-               }
-               usleep_range(5000, 10000);
-       }
-       return false;
-}
-
-static inline bool is_mt7612(struct mt76x2_dev *dev)
-{
-       return mt76_chip(&dev->mt76) == 0x7612;
-}
-
-void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
-
-static inline bool mt76x2_channel_silent(struct mt76x2_dev *dev)
-{
-       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-
-       return ((chan->flags & IEEE80211_CHAN_RADAR) &&
-               chan->dfs_state != NL80211_DFS_AVAILABLE);
-}
-
-static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
-{
-       mt76x2_set_irq_mask(dev, 0, mask);
-}
-
-static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
-{
-       mt76x2_set_irq_mask(dev, mask, 0);
-}
-
-static inline bool mt76x2_wait_for_bbp(struct mt76x2_dev *dev)
-{
-       return mt76_poll_msec(dev, MT_MAC_STATUS,
-                             MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
-                             0, 100);
-}
-
-static inline bool wait_for_wpdma(struct mt76x2_dev *dev)
-{
-       return mt76_poll(dev, MT_WPDMA_GLO_CFG,
-                        MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
-                        MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
-                        0, 1000);
-}
-
-extern const struct ieee80211_ops mt76x2_ops;
-
-extern struct ieee80211_rate mt76x2_rates[12];
-
-struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
-int mt76x2_register_device(struct mt76x2_dev *dev);
-void mt76x2_init_debugfs(struct mt76x2_dev *dev);
-void mt76x2_init_device(struct mt76x2_dev *dev);
-
-irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
-void mt76x2_phy_power_on(struct mt76x2_dev *dev);
-int mt76x2_init_hardware(struct mt76x2_dev *dev);
-void mt76x2_stop_hardware(struct mt76x2_dev *dev);
-int mt76x2_eeprom_init(struct mt76x2_dev *dev);
-int mt76x2_apply_calibration_data(struct mt76x2_dev *dev, int channel);
-void mt76x2_set_tx_ackto(struct mt76x2_dev *dev);
-
-void mt76x2_phy_set_antenna(struct mt76x2_dev *dev);
-int mt76x2_phy_start(struct mt76x2_dev *dev);
-int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
-                        struct cfg80211_chan_def *chandef);
-int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
-void mt76x2_phy_calibrate(struct work_struct *work);
-void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
-
-int mt76x2_mcu_init(struct mt76x2_dev *dev);
-int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
-                          u8 bw_index, bool scan);
-int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on);
-int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
-                      u8 channel);
-int mt76x2_mcu_cleanup(struct mt76x2_dev *dev);
-
-int mt76x2_dma_init(struct mt76x2_dev *dev);
-void mt76x2_dma_cleanup(struct mt76x2_dev *dev);
-
-void mt76x2_cleanup(struct mt76x2_dev *dev);
-
-int mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
-                       struct sk_buff *skb, int cmd, int seq);
-void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
-              struct sk_buff *skb);
-void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb);
-int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
-                         struct sk_buff *skb, struct mt76_queue *q,
-                         struct mt76_wcid *wcid, struct ieee80211_sta *sta,
-                         u32 *tx_info);
-void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
-                           struct mt76_queue_entry *e, bool flush);
-void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val);
-
-void mt76x2_pre_tbtt_tasklet(unsigned long arg);
-
-void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
-void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-                        struct sk_buff *skb);
-
-void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
-
-void mt76x2_update_channel(struct mt76_dev *mdev);
-
-s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
-                              const struct ieee80211_tx_rate *rate);
-s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
-void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
-
-int mt76x2_insert_hdr_pad(struct sk_buff *skb);
-
-bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
-                              struct mt76x2_tx_status *stat);
-void mt76x2_send_tx_status(struct mt76x2_dev *dev,
-                          struct mt76x2_tx_status *stat, u8 *update);
-void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable);
-void mt76x2_init_txpower(struct mt76x2_dev *dev,
-                        struct ieee80211_supported_band *sband);
-void mt76_write_mac_initvals(struct mt76x2_dev *dev);
-
-int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                       struct ieee80211_ampdu_params *params);
-int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                  struct ieee80211_sta *sta);
-int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                     struct ieee80211_sta *sta);
-void mt76x2_remove_interface(struct ieee80211_hw *hw,
-                            struct ieee80211_vif *vif);
-int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-                  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
-                  struct ieee80211_key_conf *key);
-int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                  u16 queue, const struct ieee80211_tx_queue_params *params);
-void mt76x2_configure_filter(struct ieee80211_hw *hw,
-                            unsigned int changed_flags,
-                            unsigned int *total_flags, u64 multicast);
-void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq);
-void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
-                               struct ieee80211_vif *vif,
-                               struct ieee80211_sta *sta);
-
-void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
-                                enum nl80211_band band);
-void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
-                              enum nl80211_band band, u8 bw);
-void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl);
-void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper);
-int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev);
-void mt76x2_apply_gain_adj(struct mt76x2_dev *dev);
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig
new file mode 100644 (file)
index 0000000..2b414a0
--- /dev/null
@@ -0,0 +1,20 @@
+config MT76x2_COMMON
+       tristate
+       select MT76x02_LIB
+
+config MT76x2E
+       tristate "MediaTek MT76x2E (PCIe) support"
+       select MT76x2_COMMON
+       depends on MAC80211
+       depends on PCI
+       ---help---
+         This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
+
+config MT76x2U
+       tristate "MediaTek MT76x2U (USB) support"
+       select MT76x2_COMMON
+       select MT76x02_USB
+       depends on MAC80211
+       depends on USB
+       help
+         This adds support for MT7612U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
new file mode 100644 (file)
index 0000000..b71bb10
--- /dev/null
@@ -0,0 +1,16 @@
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
+obj-$(CONFIG_MT76x2E) += mt76x2e.o
+obj-$(CONFIG_MT76x2U) += mt76x2u.o
+
+mt76x2-common-y := \
+       eeprom.o mac.o init.o phy.o debugfs.o mcu.o
+
+mt76x2e-y := \
+       pci.o pci_main.o pci_init.o pci_tx.o \
+       pci_mac.o pci_mcu.o pci_phy.o pci_dfs.o
+
+mt76x2u-y := \
+       usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \
+       usb_phy.o
+
+CFLAGS_pci_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c
new file mode 100644 (file)
index 0000000..e8f8ccc
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include "mt76x2.h"
+
+static int
+mt76x2_ampdu_stat_read(struct seq_file *file, void *data)
+{
+       struct mt76x02_dev *dev = file->private;
+       int i, j;
+
+       for (i = 0; i < 4; i++) {
+               seq_puts(file, "Length: ");
+               for (j = 0; j < 8; j++)
+                       seq_printf(file, "%8d | ", i * 8 + j + 1);
+               seq_puts(file, "\n");
+               seq_puts(file, "Count:  ");
+               for (j = 0; j < 8; j++)
+                       seq_printf(file, "%8d | ", dev->aggr_stats[i * 8 + j]);
+               seq_puts(file, "\n");
+               seq_puts(file, "--------");
+               for (j = 0; j < 8; j++)
+                       seq_puts(file, "-----------");
+               seq_puts(file, "\n");
+       }
+
+       return 0;
+}
+
+static int
+mt76x2_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+       return single_open(f, mt76x2_ampdu_stat_read, inode->i_private);
+}
+
+static int read_txpower(struct seq_file *file, void *data)
+{
+       struct mt76x02_dev *dev = dev_get_drvdata(file->private);
+
+       seq_printf(file, "Target power: %d\n", dev->target_power);
+
+       mt76_seq_puts_array(file, "Delta", dev->target_power_delta,
+                           ARRAY_SIZE(dev->target_power_delta));
+       return 0;
+}
+
+static const struct file_operations fops_ampdu_stat = {
+       .open = mt76x2_ampdu_stat_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int
+mt76x2_dfs_stat_read(struct seq_file *file, void *data)
+{
+       struct mt76x02_dev *dev = file->private;
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+       int i;
+
+       seq_printf(file, "allocated sequences:\t%d\n",
+                  dfs_pd->seq_stats.seq_pool_len);
+       seq_printf(file, "used sequences:\t\t%d\n",
+                  dfs_pd->seq_stats.seq_len);
+       seq_puts(file, "\n");
+
+       for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+               seq_printf(file, "engine: %d\n", i);
+               seq_printf(file, "  hw pattern detected:\t%d\n",
+                          dfs_pd->stats[i].hw_pattern);
+               seq_printf(file, "  hw pulse discarded:\t%d\n",
+                          dfs_pd->stats[i].hw_pulse_discarded);
+               seq_printf(file, "  sw pattern detected:\t%d\n",
+                          dfs_pd->stats[i].sw_pattern);
+       }
+
+       return 0;
+}
+
+static int
+mt76x2_dfs_stat_open(struct inode *inode, struct file *f)
+{
+       return single_open(f, mt76x2_dfs_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_dfs_stat = {
+       .open = mt76x2_dfs_stat_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int read_agc(struct seq_file *file, void *data)
+{
+       struct mt76x02_dev *dev = dev_get_drvdata(file->private);
+
+       seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all);
+       seq_printf(file, "low_gain: %d\n", dev->cal.low_gain);
+       seq_printf(file, "false_cca: %d\n", dev->cal.false_cca);
+       seq_printf(file, "agc_gain_adjust: %d\n", dev->cal.agc_gain_adjust);
+
+       return 0;
+}
+
+void mt76x2_init_debugfs(struct mt76x02_dev *dev)
+{
+       struct dentry *dir;
+
+       dir = mt76_register_debugfs(&dev->mt76);
+       if (!dir)
+               return;
+
+       debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
+       debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
+
+       debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
+       debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
+       debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
+                                   read_txpower);
+
+       debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h
new file mode 100644 (file)
index 0000000..3cb9d18
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __DFS_H
+#define __DFS_H
+
+void mt76x2_dfs_init_params(struct mt76x02_dev *dev);
+void mt76x2_dfs_init_detector(struct mt76x02_dev *dev);
+void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev);
+void mt76x2_dfs_set_domain(struct mt76x02_dev *dev,
+                          enum nl80211_dfs_regions region);
+
+#endif /* __DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
new file mode 100644 (file)
index 0000000..bbab021
--- /dev/null
@@ -0,0 +1,535 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include "mt76x2.h"
+#include "eeprom.h"
+
+#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
+
+static int
+mt76x2_eeprom_copy(struct mt76x02_dev *dev, enum mt76x02_eeprom_field field,
+                  void *dest, int len)
+{
+       if (field + len > dev->mt76.eeprom.size)
+               return -1;
+
+       memcpy(dest, dev->mt76.eeprom.data + field, len);
+       return 0;
+}
+
+static int
+mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev)
+{
+       void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
+
+       memcpy(dev->mt76.macaddr, src, ETH_ALEN);
+       return 0;
+}
+
+static bool
+mt76x2_has_cal_free_data(struct mt76x02_dev *dev, u8 *efuse)
+{
+       u16 *efuse_w = (u16 *) efuse;
+
+       if (efuse_w[MT_EE_NIC_CONF_0] != 0)
+               return false;
+
+       if (efuse_w[MT_EE_XTAL_TRIM_1] == 0xffff)
+               return false;
+
+       if (efuse_w[MT_EE_TX_POWER_DELTA_BW40] != 0)
+               return false;
+
+       if (efuse_w[MT_EE_TX_POWER_0_START_2G] == 0xffff)
+               return false;
+
+       if (efuse_w[MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA] != 0)
+               return false;
+
+       if (efuse_w[MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE] == 0xffff)
+               return false;
+
+       return true;
+}
+
+static void
+mt76x2_apply_cal_free_data(struct mt76x02_dev *dev, u8 *efuse)
+{
+#define GROUP_5G(_id)                                                     \
+       MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id),     \
+       MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1, \
+       MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id),     \
+       MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1
+
+       static const u8 cal_free_bytes[] = {
+               MT_EE_XTAL_TRIM_1,
+               MT_EE_TX_POWER_EXT_PA_5G + 1,
+               MT_EE_TX_POWER_0_START_2G,
+               MT_EE_TX_POWER_0_START_2G + 1,
+               MT_EE_TX_POWER_1_START_2G,
+               MT_EE_TX_POWER_1_START_2G + 1,
+               GROUP_5G(0),
+               GROUP_5G(1),
+               GROUP_5G(2),
+               GROUP_5G(3),
+               GROUP_5G(4),
+               GROUP_5G(5),
+               MT_EE_RF_2G_TSSI_OFF_TXPOWER,
+               MT_EE_RF_2G_RX_HIGH_GAIN + 1,
+               MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN,
+               MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN + 1,
+               MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN,
+               MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN + 1,
+               MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN,
+               MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN + 1,
+       };
+       u8 *eeprom = dev->mt76.eeprom.data;
+       u8 prev_grp0[4] = {
+               eeprom[MT_EE_TX_POWER_0_START_5G],
+               eeprom[MT_EE_TX_POWER_0_START_5G + 1],
+               eeprom[MT_EE_TX_POWER_1_START_5G],
+               eeprom[MT_EE_TX_POWER_1_START_5G + 1]
+       };
+       u16 val;
+       int i;
+
+       if (!mt76x2_has_cal_free_data(dev, efuse))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(cal_free_bytes); i++) {
+               int offset = cal_free_bytes[i];
+
+               eeprom[offset] = efuse[offset];
+       }
+
+       if (!(efuse[MT_EE_TX_POWER_0_START_5G] |
+             efuse[MT_EE_TX_POWER_0_START_5G + 1]))
+               memcpy(eeprom + MT_EE_TX_POWER_0_START_5G, prev_grp0, 2);
+       if (!(efuse[MT_EE_TX_POWER_1_START_5G] |
+             efuse[MT_EE_TX_POWER_1_START_5G + 1]))
+               memcpy(eeprom + MT_EE_TX_POWER_1_START_5G, prev_grp0 + 2, 2);
+
+       val = get_unaligned_le16(efuse + MT_EE_BT_RCAL_RESULT);
+       if (val != 0xffff)
+               eeprom[MT_EE_BT_RCAL_RESULT] = val & 0xff;
+
+       val = get_unaligned_le16(efuse + MT_EE_BT_VCDL_CALIBRATION);
+       if (val != 0xffff)
+               eeprom[MT_EE_BT_VCDL_CALIBRATION + 1] = val >> 8;
+
+       val = get_unaligned_le16(efuse + MT_EE_BT_PMUCFG);
+       if (val != 0xffff)
+               eeprom[MT_EE_BT_PMUCFG] = val & 0xff;
+}
+
+static int mt76x2_check_eeprom(struct mt76x02_dev *dev)
+{
+       u16 val = get_unaligned_le16(dev->mt76.eeprom.data);
+
+       if (!val)
+               val = get_unaligned_le16(dev->mt76.eeprom.data + MT_EE_PCI_ID);
+
+       switch (val) {
+       case 0x7662:
+       case 0x7612:
+               return 0;
+       default:
+               dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n", val);
+               return -EINVAL;
+       }
+}
+
+static int
+mt76x2_eeprom_load(struct mt76x02_dev *dev)
+{
+       void *efuse;
+       bool found;
+       int ret;
+
+       ret = mt76_eeprom_init(&dev->mt76, MT7662_EEPROM_SIZE);
+       if (ret < 0)
+               return ret;
+
+       found = ret;
+       if (found)
+               found = !mt76x2_check_eeprom(dev);
+
+       dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, MT7662_EEPROM_SIZE,
+                                         GFP_KERNEL);
+       dev->mt76.otp.size = MT7662_EEPROM_SIZE;
+       if (!dev->mt76.otp.data)
+               return -ENOMEM;
+
+       efuse = dev->mt76.otp.data;
+
+       if (mt76x02_get_efuse_data(&dev->mt76, 0, efuse,
+                                  MT7662_EEPROM_SIZE, MT_EE_READ))
+               goto out;
+
+       if (found) {
+               mt76x2_apply_cal_free_data(dev, efuse);
+       } else {
+               /* FIXME: check if efuse data is complete */
+               found = true;
+               memcpy(dev->mt76.eeprom.data, efuse, MT7662_EEPROM_SIZE);
+       }
+
+out:
+       if (!found)
+               return -ENOENT;
+
+       return 0;
+}
+
+static void
+mt76x2_set_rx_gain_group(struct mt76x02_dev *dev, u8 val)
+{
+       s8 *dest = dev->cal.rx.high_gain;
+
+       if (!mt76x02_field_valid(val)) {
+               dest[0] = 0;
+               dest[1] = 0;
+               return;
+       }
+
+       dest[0] = mt76x02_sign_extend(val, 4);
+       dest[1] = mt76x02_sign_extend(val >> 4, 4);
+}
+
+static void
+mt76x2_set_rssi_offset(struct mt76x02_dev *dev, int chain, u8 val)
+{
+       s8 *dest = dev->cal.rx.rssi_offset;
+
+       if (!mt76x02_field_valid(val)) {
+               dest[chain] = 0;
+               return;
+       }
+
+       dest[chain] = mt76x02_sign_extend_optional(val, 7);
+}
+
+static enum mt76x2_cal_channel_group
+mt76x2_get_cal_channel_group(int channel)
+{
+       if (channel >= 184 && channel <= 196)
+               return MT_CH_5G_JAPAN;
+       if (channel <= 48)
+               return MT_CH_5G_UNII_1;
+       if (channel <= 64)
+               return MT_CH_5G_UNII_2;
+       if (channel <= 114)
+               return MT_CH_5G_UNII_2E_1;
+       if (channel <= 144)
+               return MT_CH_5G_UNII_2E_2;
+       return MT_CH_5G_UNII_3;
+}
+
+static u8
+mt76x2_get_5g_rx_gain(struct mt76x02_dev *dev, u8 channel)
+{
+       enum mt76x2_cal_channel_group group;
+
+       group = mt76x2_get_cal_channel_group(channel);
+       switch (group) {
+       case MT_CH_5G_JAPAN:
+               return mt76x02_eeprom_get(&dev->mt76,
+                                         MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
+       case MT_CH_5G_UNII_1:
+               return mt76x02_eeprom_get(&dev->mt76,
+                                         MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
+       case MT_CH_5G_UNII_2:
+               return mt76x02_eeprom_get(&dev->mt76,
+                                         MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
+       case MT_CH_5G_UNII_2E_1:
+               return mt76x02_eeprom_get(&dev->mt76,
+                                         MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
+       case MT_CH_5G_UNII_2E_2:
+               return mt76x02_eeprom_get(&dev->mt76,
+                                         MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
+       default:
+               return mt76x02_eeprom_get(&dev->mt76,
+                                         MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
+       }
+}
+
+void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
+{
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+       int channel = chan->hw_value;
+       s8 lna_5g[3], lna_2g;
+       u8 lna;
+       u16 val;
+
+       if (chan->band == NL80211_BAND_2GHZ)
+               val = mt76x02_eeprom_get(&dev->mt76,
+                                        MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
+       else
+               val = mt76x2_get_5g_rx_gain(dev, channel);
+
+       mt76x2_set_rx_gain_group(dev, val);
+
+       mt76x02_get_rx_gain(&dev->mt76, chan->band, &val, &lna_2g, lna_5g);
+       mt76x2_set_rssi_offset(dev, 0, val);
+       mt76x2_set_rssi_offset(dev, 1, val >> 8);
+
+       dev->cal.rx.mcu_gain =  (lna_2g & 0xff);
+       dev->cal.rx.mcu_gain |= (lna_5g[0] & 0xff) << 8;
+       dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
+       dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
+
+       lna = mt76x02_get_lna_gain(&dev->mt76, &lna_2g, lna_5g, chan);
+       dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
+}
+EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
+
+void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
+                          struct ieee80211_channel *chan)
+{
+       bool is_5ghz;
+       u16 val;
+
+       is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+       memset(t, 0, sizeof(*t));
+
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_CCK);
+       t->cck[0] = t->cck[1] = mt76x02_rate_power_val(val);
+       t->cck[2] = t->cck[3] = mt76x02_rate_power_val(val >> 8);
+
+       if (is_5ghz)
+               val = mt76x02_eeprom_get(&dev->mt76,
+                                        MT_EE_TX_POWER_OFDM_5G_6M);
+       else
+               val = mt76x02_eeprom_get(&dev->mt76,
+                                        MT_EE_TX_POWER_OFDM_2G_6M);
+       t->ofdm[0] = t->ofdm[1] = mt76x02_rate_power_val(val);
+       t->ofdm[2] = t->ofdm[3] = mt76x02_rate_power_val(val >> 8);
+
+       if (is_5ghz)
+               val = mt76x02_eeprom_get(&dev->mt76,
+                                        MT_EE_TX_POWER_OFDM_5G_24M);
+       else
+               val = mt76x02_eeprom_get(&dev->mt76,
+                                        MT_EE_TX_POWER_OFDM_2G_24M);
+       t->ofdm[4] = t->ofdm[5] = mt76x02_rate_power_val(val);
+       t->ofdm[6] = t->ofdm[7] = mt76x02_rate_power_val(val >> 8);
+
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS0);
+       t->ht[0] = t->ht[1] = mt76x02_rate_power_val(val);
+       t->ht[2] = t->ht[3] = mt76x02_rate_power_val(val >> 8);
+
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS4);
+       t->ht[4] = t->ht[5] = mt76x02_rate_power_val(val);
+       t->ht[6] = t->ht[7] = mt76x02_rate_power_val(val >> 8);
+
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS8);
+       t->ht[8] = t->ht[9] = mt76x02_rate_power_val(val);
+       t->ht[10] = t->ht[11] = mt76x02_rate_power_val(val >> 8);
+
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS12);
+       t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val);
+       t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8);
+
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS0);
+       t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val);
+       t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8);
+
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS4);
+       t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val);
+       t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8);
+
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS8);
+       if (!is_5ghz)
+               val >>= 8;
+       t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8);
+
+       memcpy(t->stbc, t->ht, sizeof(t->stbc[0]) * 8);
+       t->stbc[8] = t->vht[8];
+       t->stbc[9] = t->vht[9];
+}
+EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
+
+static void
+mt76x2_get_power_info_2g(struct mt76x02_dev *dev,
+                        struct mt76x2_tx_power_info *t,
+                        struct ieee80211_channel *chan,
+                        int chain, int offset)
+{
+       int channel = chan->hw_value;
+       int delta_idx;
+       u8 data[6];
+       u16 val;
+
+       if (channel < 6)
+               delta_idx = 3;
+       else if (channel < 11)
+               delta_idx = 4;
+       else
+               delta_idx = 5;
+
+       mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+
+       t->chain[chain].tssi_slope = data[0];
+       t->chain[chain].tssi_offset = data[1];
+       t->chain[chain].target_power = data[2];
+       t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7);
+
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
+       t->target_power = val >> 8;
+}
+
+static void
+mt76x2_get_power_info_5g(struct mt76x02_dev *dev,
+                        struct mt76x2_tx_power_info *t,
+                        struct ieee80211_channel *chan,
+                        int chain, int offset)
+{
+       int channel = chan->hw_value;
+       enum mt76x2_cal_channel_group group;
+       int delta_idx;
+       u16 val;
+       u8 data[5];
+
+       group = mt76x2_get_cal_channel_group(channel);
+       offset += group * MT_TX_POWER_GROUP_SIZE_5G;
+
+       if (channel >= 192)
+               delta_idx = 4;
+       else if (channel >= 184)
+               delta_idx = 3;
+       else if (channel < 44)
+               delta_idx = 3;
+       else if (channel < 52)
+               delta_idx = 4;
+       else if (channel < 58)
+               delta_idx = 3;
+       else if (channel < 98)
+               delta_idx = 4;
+       else if (channel < 106)
+               delta_idx = 3;
+       else if (channel < 116)
+               delta_idx = 4;
+       else if (channel < 130)
+               delta_idx = 3;
+       else if (channel < 149)
+               delta_idx = 4;
+       else if (channel < 157)
+               delta_idx = 3;
+       else
+               delta_idx = 4;
+
+       mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+
+       t->chain[chain].tssi_slope = data[0];
+       t->chain[chain].tssi_offset = data[1];
+       t->chain[chain].target_power = data[2];
+       t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7);
+
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_RX_HIGH_GAIN);
+       t->target_power = val & 0xff;
+}
+
+void mt76x2_get_power_info(struct mt76x02_dev *dev,
+                          struct mt76x2_tx_power_info *t,
+                          struct ieee80211_channel *chan)
+{
+       u16 bw40, bw80;
+
+       memset(t, 0, sizeof(*t));
+
+       bw40 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW40);
+       bw80 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW80);
+
+       if (chan->band == NL80211_BAND_5GHZ) {
+               bw40 >>= 8;
+               mt76x2_get_power_info_5g(dev, t, chan, 0,
+                                        MT_EE_TX_POWER_0_START_5G);
+               mt76x2_get_power_info_5g(dev, t, chan, 1,
+                                        MT_EE_TX_POWER_1_START_5G);
+       } else {
+               mt76x2_get_power_info_2g(dev, t, chan, 0,
+                                        MT_EE_TX_POWER_0_START_2G);
+               mt76x2_get_power_info_2g(dev, t, chan, 1,
+                                        MT_EE_TX_POWER_1_START_2G);
+       }
+
+       if (mt76x02_tssi_enabled(&dev->mt76) ||
+           !mt76x02_field_valid(t->target_power))
+               t->target_power = t->chain[0].target_power;
+
+       t->delta_bw40 = mt76x02_rate_power_val(bw40);
+       t->delta_bw80 = mt76x02_rate_power_val(bw80);
+}
+EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
+
+int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t)
+{
+       enum nl80211_band band = dev->mt76.chandef.chan->band;
+       u16 val, slope;
+       u8 bounds;
+
+       memset(t, 0, sizeof(*t));
+
+       if (!mt76x02_temp_tx_alc_enabled(&dev->mt76))
+               return -EINVAL;
+
+       if (!mt76x02_ext_pa_enabled(&dev->mt76, band))
+               return -EINVAL;
+
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
+       t->temp_25_ref = val & 0x7f;
+       if (band == NL80211_BAND_5GHZ) {
+               slope = mt76x02_eeprom_get(&dev->mt76,
+                                          MT_EE_RF_TEMP_COMP_SLOPE_5G);
+               bounds = mt76x02_eeprom_get(&dev->mt76,
+                                           MT_EE_TX_POWER_EXT_PA_5G);
+       } else {
+               slope = mt76x02_eeprom_get(&dev->mt76,
+                                          MT_EE_RF_TEMP_COMP_SLOPE_2G);
+               bounds = mt76x02_eeprom_get(&dev->mt76,
+                                           MT_EE_TX_POWER_DELTA_BW80) >> 8;
+       }
+
+       t->high_slope = slope & 0xff;
+       t->low_slope = slope >> 8;
+       t->lower_bound = 0 - (bounds & 0xf);
+       t->upper_bound = (bounds >> 4) & 0xf;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp);
+
+int mt76x2_eeprom_init(struct mt76x02_dev *dev)
+{
+       int ret;
+
+       ret = mt76x2_eeprom_load(dev);
+       if (ret)
+               return ret;
+
+       mt76x02_eeprom_parse_hw_cap(&dev->mt76);
+       mt76x2_eeprom_get_macaddr(dev);
+       mt76_eeprom_override(&dev->mt76);
+       dev->mt76.macaddr[0] &= ~BIT(1);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
new file mode 100644 (file)
index 0000000..c97b31c
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_EEPROM_H
+#define __MT76x2_EEPROM_H
+
+#include "../mt76x02_eeprom.h"
+
+enum mt76x2_cal_channel_group {
+       MT_CH_5G_JAPAN,
+       MT_CH_5G_UNII_1,
+       MT_CH_5G_UNII_2,
+       MT_CH_5G_UNII_2E_1,
+       MT_CH_5G_UNII_2E_2,
+       MT_CH_5G_UNII_3,
+       __MT_CH_MAX
+};
+
+struct mt76x2_tx_power_info {
+       u8 target_power;
+
+       s8 delta_bw40;
+       s8 delta_bw80;
+
+       struct {
+               s8 tssi_slope;
+               s8 tssi_offset;
+               s8 target_power;
+               s8 delta;
+       } chain[MT_MAX_CHAINS];
+};
+
+struct mt76x2_temp_comp {
+       u8 temp_25_ref;
+       int lower_bound; /* J */
+       int upper_bound; /* J */
+       unsigned int high_slope; /* J / dB */
+       unsigned int low_slope; /* J / dB */
+};
+
+void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
+                          struct ieee80211_channel *chan);
+void mt76x2_get_power_info(struct mt76x02_dev *dev,
+                          struct mt76x2_tx_power_info *t,
+                          struct ieee80211_channel *chan);
+int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t);
+void mt76x2_read_rx_gain(struct mt76x02_dev *dev);
+
+static inline bool
+mt76x2_has_ext_lna(struct mt76x02_dev *dev)
+{
+       u32 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1);
+
+       if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+               return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
+       else
+               return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
new file mode 100644 (file)
index 0000000..ccd9bc9
--- /dev/null
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+
+static void
+mt76x2_set_wlan_state(struct mt76x02_dev *dev, bool enable)
+{
+       u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+       if (enable)
+               val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+                       MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+       else
+               val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
+                        MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+
+       mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+       udelay(20);
+}
+
+void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable)
+{
+       u32 val;
+
+       if (!enable)
+               goto out;
+
+       val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+       val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+       if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+               val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+               mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+               udelay(20);
+
+               val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+       }
+
+       mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+       udelay(20);
+
+out:
+       mt76x2_set_wlan_state(dev, enable);
+}
+EXPORT_SYMBOL_GPL(mt76x2_reset_wlan);
+
+void mt76_write_mac_initvals(struct mt76x02_dev *dev)
+{
+#define DEFAULT_PROT_CFG_CCK                           \
+       (FIELD_PREP(MT_PROT_CFG_RATE, 0x3) |            \
+        FIELD_PREP(MT_PROT_CFG_NAV, 1) |               \
+        FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) |     \
+        MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_OFDM                          \
+       (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |         \
+        FIELD_PREP(MT_PROT_CFG_NAV, 1) |                       \
+        FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) |     \
+        MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_20                            \
+       (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |         \
+        FIELD_PREP(MT_PROT_CFG_CTRL, 1) |              \
+        FIELD_PREP(MT_PROT_CFG_NAV, 1) |                       \
+        FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
+
+#define DEFAULT_PROT_CFG_40                            \
+       (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) |         \
+        FIELD_PREP(MT_PROT_CFG_CTRL, 1) |              \
+        FIELD_PREP(MT_PROT_CFG_NAV, 1) |                       \
+        FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
+
+       static const struct mt76_reg_pair vals[] = {
+               /* Copied from MediaTek reference source */
+               { MT_PBF_SYS_CTRL,              0x00080c00 },
+               { MT_PBF_CFG,                   0x1efebcff },
+               { MT_FCE_PSE_CTRL,              0x00000001 },
+               { MT_MAC_SYS_CTRL,              0x0000000c },
+               { MT_MAX_LEN_CFG,               0x003e3f00 },
+               { MT_AMPDU_MAX_LEN_20M1S,       0xaaa99887 },
+               { MT_AMPDU_MAX_LEN_20M2S,       0x000000aa },
+               { MT_XIFS_TIME_CFG,             0x33a40d0a },
+               { MT_BKOFF_SLOT_CFG,            0x00000209 },
+               { MT_TBTT_SYNC_CFG,             0x00422010 },
+               { MT_PWR_PIN_CFG,               0x00000000 },
+               { 0x1238,                       0x001700c8 },
+               { MT_TX_SW_CFG0,                0x00101001 },
+               { MT_TX_SW_CFG1,                0x00010000 },
+               { MT_TX_SW_CFG2,                0x00000000 },
+               { MT_TXOP_CTRL_CFG,             0x0400583f },
+               { MT_TX_RTS_CFG,                0x00100020 },
+               { MT_TX_TIMEOUT_CFG,            0x000a2290 },
+               { MT_TX_RETRY_CFG,              0x47f01f0f },
+               { MT_EXP_ACK_TIME,              0x002c00dc },
+               { MT_TX_PROT_CFG6,              0xe3f42004 },
+               { MT_TX_PROT_CFG7,              0xe3f42084 },
+               { MT_TX_PROT_CFG8,              0xe3f42104 },
+               { MT_PIFS_TX_CFG,               0x00060fff },
+               { MT_RX_FILTR_CFG,              0x00015f97 },
+               { MT_LEGACY_BASIC_RATE,         0x0000017f },
+               { MT_HT_BASIC_RATE,             0x00004003 },
+               { MT_PN_PAD_MODE,               0x00000003 },
+               { MT_TXOP_HLDR_ET,              0x00000002 },
+               { 0xa44,                        0x00000000 },
+               { MT_HEADER_TRANS_CTRL_REG,     0x00000000 },
+               { MT_TSO_CTRL,                  0x00000000 },
+               { MT_AUX_CLK_CFG,               0x00000000 },
+               { MT_DACCLK_EN_DLY_CFG,         0x00000000 },
+               { MT_TX_ALC_CFG_4,              0x00000000 },
+               { MT_TX_ALC_VGA3,               0x00000000 },
+               { MT_TX_PWR_CFG_0,              0x3a3a3a3a },
+               { MT_TX_PWR_CFG_1,              0x3a3a3a3a },
+               { MT_TX_PWR_CFG_2,              0x3a3a3a3a },
+               { MT_TX_PWR_CFG_3,              0x3a3a3a3a },
+               { MT_TX_PWR_CFG_4,              0x3a3a3a3a },
+               { MT_TX_PWR_CFG_7,              0x3a3a3a3a },
+               { MT_TX_PWR_CFG_8,              0x0000003a },
+               { MT_TX_PWR_CFG_9,              0x0000003a },
+               { MT_EFUSE_CTRL,                0x0000d000 },
+               { MT_PAUSE_ENABLE_CONTROL1,     0x0000000a },
+               { MT_FCE_WLAN_FLOW_CONTROL1,    0x60401c18 },
+               { MT_WPDMA_DELAY_INT_CFG,       0x94ff0000 },
+               { MT_TX_SW_CFG3,                0x00000004 },
+               { MT_HT_FBK_TO_LEGACY,          0x00001818 },
+               { MT_VHT_HT_FBK_CFG1,           0xedcba980 },
+               { MT_PROT_AUTO_TX_CFG,          0x00830083 },
+               { MT_HT_CTRL_CFG,               0x000001ff },
+       };
+       struct mt76_reg_pair prot_vals[] = {
+               { MT_CCK_PROT_CFG,              DEFAULT_PROT_CFG_CCK },
+               { MT_OFDM_PROT_CFG,             DEFAULT_PROT_CFG_OFDM },
+               { MT_MM20_PROT_CFG,             DEFAULT_PROT_CFG_20 },
+               { MT_MM40_PROT_CFG,             DEFAULT_PROT_CFG_40 },
+               { MT_GF20_PROT_CFG,             DEFAULT_PROT_CFG_20 },
+               { MT_GF40_PROT_CFG,             DEFAULT_PROT_CFG_40 },
+       };
+
+       mt76_wr_rp(dev, 0, vals, ARRAY_SIZE(vals));
+       mt76_wr_rp(dev, 0, prot_vals, ARRAY_SIZE(prot_vals));
+}
+EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
+
+void mt76x2_init_device(struct mt76x02_dev *dev)
+{
+       struct ieee80211_hw *hw = mt76_hw(dev);
+
+       hw->queues = 4;
+       hw->max_rates = 1;
+       hw->max_report_rates = 7;
+       hw->max_rate_tries = 1;
+       hw->extra_tx_headroom = 2;
+
+       hw->sta_data_size = sizeof(struct mt76x02_sta);
+       hw->vif_data_size = sizeof(struct mt76x02_vif);
+
+       ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+       ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+       dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+       dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+       dev->mt76.chainmask = 0x202;
+       dev->mt76.global_wcid.idx = 255;
+       dev->mt76.global_wcid.hw_key_idx = -1;
+       dev->slottime = 9;
+
+       /* init antenna configuration */
+       dev->mt76.antenna_mask = 3;
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_device);
+
+void mt76x2_init_txpower(struct mt76x02_dev *dev,
+                        struct ieee80211_supported_band *sband)
+{
+       struct ieee80211_channel *chan;
+       struct mt76x2_tx_power_info txp;
+       struct mt76_rate_power t = {};
+       int target_power;
+       int i;
+
+       for (i = 0; i < sband->n_channels; i++) {
+               chan = &sband->channels[i];
+
+               mt76x2_get_power_info(dev, &txp, chan);
+
+               target_power = max_t(int, (txp.chain[0].target_power +
+                                          txp.chain[0].delta),
+                                         (txp.chain[1].target_power +
+                                          txp.chain[1].delta));
+
+               mt76x2_get_rate_power(dev, &t, chan);
+
+               chan->max_power = mt76x02_get_max_rate_power(&t) +
+                                 target_power;
+               chan->max_power /= 2;
+
+               /* convert to combined output power on 2x2 devices */
+               chan->max_power += 3;
+       }
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c
new file mode 100644 (file)
index 0000000..e25905c
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force)
+{
+       bool stopped = false;
+       u32 rts_cfg;
+       int i;
+
+       mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+       rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+       mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+       /* Wait for MAC to become idle */
+       for (i = 0; i < 300; i++) {
+               if ((mt76_rr(dev, MT_MAC_STATUS) &
+                    (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
+                   mt76_rr(dev, MT_BBP(IBI, 12))) {
+                       udelay(1);
+                       continue;
+               }
+
+               stopped = true;
+               break;
+       }
+
+       if (force && !stopped) {
+               mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+               mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+               mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+               mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+       }
+
+       mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
new file mode 100644 (file)
index 0000000..a31bd49
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_MAC_H
+#define __MT76x2_MAC_H
+
+#include "mt76x2.h"
+
+struct mt76x02_dev;
+struct mt76x2_sta;
+struct mt76x02_vif;
+
+int mt76x2_mac_start(struct mt76x02_dev *dev);
+void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force);
+void mt76x2_mac_resume(struct mt76x02_dev *dev);
+void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
+
+int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
+                         struct sk_buff *skb);
+void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, bool val);
+
+void mt76x2_mac_work(struct work_struct *work);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
new file mode 100644 (file)
index 0000000..134037a
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x2.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
+                          u8 bw_index, bool scan)
+{
+       struct sk_buff *skb;
+       struct {
+               u8 idx;
+               u8 scan;
+               u8 bw;
+               u8 _pad0;
+
+               __le16 chainmask;
+               u8 ext_chan;
+               u8 _pad1;
+
+       } __packed __aligned(4) msg = {
+               .idx = channel,
+               .scan = scan,
+               .bw = bw,
+               .chainmask = cpu_to_le16(dev->mt76.chainmask),
+       };
+
+       /* first set the channel without the extension channel info */
+       skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+       mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
+
+       usleep_range(5000, 10000);
+
+       msg.ext_chan = 0xe0 + bw_index;
+       skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+       return mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel);
+
+int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
+                      u8 channel)
+{
+       struct mt76_dev *mdev = &dev->mt76;
+       struct sk_buff *skb;
+       struct {
+               u8 cr_mode;
+               u8 temp;
+               u8 ch;
+               u8 _pad0;
+
+               __le32 cfg;
+       } __packed __aligned(4) msg = {
+               .cr_mode = type,
+               .temp = temp_level,
+               .ch = channel,
+       };
+       u32 val;
+
+       val = BIT(31);
+       val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+       val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+       msg.cfg = cpu_to_le32(val);
+
+       /* first set the channel without the extension channel info */
+       skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+       return mt76_mcu_send_msg(dev, skb, CMD_LOAD_CR, true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr);
+
+int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
+                        bool force)
+{
+       struct sk_buff *skb;
+       struct {
+               __le32 channel;
+               __le32 gain_val;
+       } __packed __aligned(4) msg = {
+               .channel = cpu_to_le32(channel),
+               .gain_val = cpu_to_le32(gain),
+       };
+
+       if (force)
+               msg.channel |= cpu_to_le32(BIT(31));
+
+       skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+       return mt76_mcu_send_msg(dev, skb, CMD_INIT_GAIN_OP, true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain);
+
+int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev,
+                        struct mt76x2_tssi_comp *tssi_data)
+{
+       struct sk_buff *skb;
+       struct {
+               __le32 id;
+               struct mt76x2_tssi_comp data;
+       } __packed __aligned(4) msg = {
+               .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+               .data = *tssi_data,
+       };
+
+       skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+       return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_tssi_comp);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
new file mode 100644 (file)
index 0000000..acfa2b5
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_MCU_H
+#define __MT76x2_MCU_H
+
+#include "../mt76x02_mcu.h"
+
+/* Register definitions */
+#define MT_MCU_CPU_CTL                 0x0704
+#define MT_MCU_CLOCK_CTL               0x0708
+#define MT_MCU_PCIE_REMAP_BASE1                0x0740
+#define MT_MCU_PCIE_REMAP_BASE2                0x0744
+#define MT_MCU_PCIE_REMAP_BASE3                0x0748
+
+#define MT_LED_CTRL                    0x0770
+#define MT_LED_CTRL_REPLAY(_n)         BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n)       BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n)  BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n)           BIT(7 + (8 * (_n)))
+
+#define MT_LED_TX_BLINK_0              0x0774
+#define MT_LED_TX_BLINK_1              0x0778
+
+#define MT_LED_S0_BASE                 0x077C
+#define MT_LED_S0(_n)                  (MT_LED_S0_BASE + 8 * (_n))
+#define MT_LED_S1_BASE                 0x0780
+#define MT_LED_S1(_n)                  (MT_LED_S1_BASE + 8 * (_n))
+#define MT_LED_STATUS_OFF_MASK         GENMASK(31, 24)
+#define MT_LED_STATUS_OFF(_v)          (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
+                                        MT_LED_STATUS_OFF_MASK)
+#define MT_LED_STATUS_ON_MASK          GENMASK(23, 16)
+#define MT_LED_STATUS_ON(_v)           (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
+                                        MT_LED_STATUS_ON_MASK)
+#define MT_LED_STATUS_DURATION_MASK    GENMASK(15, 8)
+#define MT_LED_STATUS_DURATION(_v)     (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
+                                        MT_LED_STATUS_DURATION_MASK)
+
+#define MT_MCU_ROM_PATCH_OFFSET                0x80000
+#define MT_MCU_ROM_PATCH_ADDR          0x90000
+
+#define MT_MCU_ILM_OFFSET              0x80000
+
+#define MT_MCU_DLM_OFFSET              0x100000
+#define MT_MCU_DLM_ADDR                        0x90000
+#define MT_MCU_DLM_ADDR_E3             0x90800
+
+enum mcu_calibration {
+       MCU_CAL_R = 1,
+       MCU_CAL_TEMP_SENSOR,
+       MCU_CAL_RXDCOC,
+       MCU_CAL_RC,
+       MCU_CAL_SX_LOGEN,
+       MCU_CAL_LC,
+       MCU_CAL_TX_LOFT,
+       MCU_CAL_TXIQ,
+       MCU_CAL_TSSI,
+       MCU_CAL_TSSI_COMP,
+       MCU_CAL_DPD,
+       MCU_CAL_RXIQC_FI,
+       MCU_CAL_RXIQC_FD,
+       MCU_CAL_PWRON,
+       MCU_CAL_TX_SHAPING,
+};
+
+enum mt76x2_mcu_cr_mode {
+       MT_RF_CR,
+       MT_BBP_CR,
+       MT_RF_BBP_CR,
+       MT_HL_TEMP_CR_UPDATE,
+};
+
+struct mt76x2_tssi_comp {
+       u8 pa_mode;
+       u8 cal_mode;
+       u16 pad;
+
+       u8 slope0;
+       u8 slope1;
+       u8 offset0;
+       u8 offset1;
+} __packed __aligned(4);
+
+int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev, struct mt76x2_tssi_comp *tssi_data);
+int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
+                        bool force);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
new file mode 100644 (file)
index 0000000..cbec8c6
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_H
+#define __MT76x2_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+
+#define MT7662_FIRMWARE                "mt7662.bin"
+#define MT7662_ROM_PATCH       "mt7662_rom_patch.bin"
+#define MT7662_EEPROM_SIZE     512
+
+#define MT7662U_FIRMWARE       "mediatek/mt7662u.bin"
+#define MT7662U_ROM_PATCH      "mediatek/mt7662u_rom_patch.bin"
+
+#define MT_CALIBRATE_INTERVAL  HZ
+
+#include "../mt76x02.h"
+#include "mac.h"
+#include "dfs.h"
+
+static inline bool is_mt7612(struct mt76x02_dev *dev)
+{
+       return mt76_chip(&dev->mt76) == 0x7612;
+}
+
+static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev)
+{
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+       return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+               chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
+extern const struct ieee80211_ops mt76x2_ops;
+
+struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev);
+int mt76x2_register_device(struct mt76x02_dev *dev);
+void mt76x2_init_debugfs(struct mt76x02_dev *dev);
+void mt76x2_init_device(struct mt76x02_dev *dev);
+
+void mt76x2_phy_power_on(struct mt76x02_dev *dev);
+int mt76x2_init_hardware(struct mt76x02_dev *dev);
+void mt76x2_stop_hardware(struct mt76x02_dev *dev);
+int mt76x2_eeprom_init(struct mt76x02_dev *dev);
+int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel);
+void mt76x2_set_tx_ackto(struct mt76x02_dev *dev);
+
+void mt76x2_phy_set_antenna(struct mt76x02_dev *dev);
+int mt76x2_phy_start(struct mt76x02_dev *dev);
+int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
+                          struct cfg80211_chan_def *chandef);
+void mt76x2_phy_calibrate(struct work_struct *work);
+void mt76x2_phy_set_txpower(struct mt76x02_dev *dev);
+
+int mt76x2_mcu_init(struct mt76x02_dev *dev);
+int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
+                          u8 bw_index, bool scan);
+int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
+                      u8 channel);
+
+void mt76x2_cleanup(struct mt76x02_dev *dev);
+
+void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val);
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg);
+
+void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
+
+void mt76x2_update_channel(struct mt76_dev *mdev);
+
+void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
+void mt76x2_init_txpower(struct mt76x02_dev *dev,
+                        struct ieee80211_supported_band *sband);
+void mt76_write_mac_initvals(struct mt76x02_dev *dev);
+
+void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait);
+void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
+                                enum nl80211_band band);
+void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
+                              enum nl80211_band band, u8 bw);
+void mt76x2_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
+void mt76x2_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper);
+void mt76x2_apply_gain_adj(struct mt76x02_dev *dev);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
new file mode 100644 (file)
index 0000000..6e932b5
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2U_H
+#define __MT76x2U_H
+
+#include <linux/device.h>
+
+#include "mt76x2.h"
+#include "mcu.h"
+
+#define MT7612U_EEPROM_SIZE            512
+
+#define MT_USB_AGGR_SIZE_LIMIT         21 /* 1024B unit */
+#define MT_USB_AGGR_TIMEOUT            0x80 /* 33ns unit */
+
+extern const struct ieee80211_ops mt76x2u_ops;
+
+struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev);
+int mt76x2u_register_device(struct mt76x02_dev *dev);
+int mt76x2u_init_hardware(struct mt76x02_dev *dev);
+void mt76x2u_cleanup(struct mt76x02_dev *dev);
+void mt76x2u_stop_hw(struct mt76x02_dev *dev);
+
+int mt76x2u_mac_reset(struct mt76x02_dev *dev);
+void mt76x2u_mac_resume(struct mt76x02_dev *dev);
+int mt76x2u_mac_start(struct mt76x02_dev *dev);
+int mt76x2u_mac_stop(struct mt76x02_dev *dev);
+
+int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
+                           struct cfg80211_chan_def *chandef);
+void mt76x2u_phy_calibrate(struct work_struct *work);
+void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev);
+
+void mt76x2u_mcu_complete_urb(struct urb *urb);
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap,
+                               bool ext, int rssi, u32 false_cca);
+int mt76x2u_mcu_init(struct mt76x02_dev *dev);
+int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev);
+
+int mt76x2u_alloc_queues(struct mt76x02_dev *dev);
+void mt76x2u_queues_deinit(struct mt76x02_dev *dev);
+void mt76x2u_stop_queues(struct mt76x02_dev *dev);
+int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
+                        u32 flags);
+
+#endif /* __MT76x2U_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
new file mode 100644 (file)
index 0000000..92432fe
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt76x2.h"
+
+static const struct pci_device_id mt76pci_device_table[] = {
+       { PCI_DEVICE(0x14c3, 0x7662) },
+       { PCI_DEVICE(0x14c3, 0x7612) },
+       { PCI_DEVICE(0x14c3, 0x7602) },
+       { },
+};
+
+static int
+mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct mt76x02_dev *dev;
+       int ret;
+
+       ret = pcim_enable_device(pdev);
+       if (ret)
+               return ret;
+
+       ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+       if (ret)
+               return ret;
+
+       pci_set_master(pdev);
+
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
+       dev = mt76x2_alloc_device(&pdev->dev);
+       if (!dev)
+               return -ENOMEM;
+
+       mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+       mt76x2_reset_wlan(dev, false);
+
+       dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+       dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+       ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler,
+                              IRQF_SHARED, KBUILD_MODNAME, dev);
+       if (ret)
+               goto error;
+
+       ret = mt76x2_register_device(dev);
+       if (ret)
+               goto error;
+
+       /* Fix up ASPM configuration */
+
+       /* RG_SSUSB_G1_CDR_BIR_LTR = 0x9 */
+       mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
+
+       /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
+       mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+
+       /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
+       mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
+
+       return 0;
+
+error:
+       ieee80211_free_hw(mt76_hw(dev));
+       return ret;
+}
+
+static void
+mt76pci_remove(struct pci_dev *pdev)
+{
+       struct mt76_dev *mdev = pci_get_drvdata(pdev);
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+       mt76_unregister_device(mdev);
+       mt76x2_cleanup(dev);
+       ieee80211_free_hw(mdev->hw);
+}
+
+MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
+MODULE_FIRMWARE(MT7662_FIRMWARE);
+MODULE_FIRMWARE(MT7662_ROM_PATCH);
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct pci_driver mt76pci_driver = {
+       .name           = KBUILD_MODNAME,
+       .id_table       = mt76pci_device_table,
+       .probe          = mt76pci_probe,
+       .remove         = mt76pci_remove,
+};
+
+module_pci_driver(mt76pci_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c
new file mode 100644 (file)
index 0000000..b56feba
--- /dev/null
@@ -0,0 +1,878 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+#define RADAR_SPEC(m, len, el, eh, wl, wh,             \
+                  w_tolerance, tl, th, t_tolerance,    \
+                  bl, bh, event_exp, power_jmp)        \
+{                                                      \
+       .mode = m,                                      \
+       .avg_len = len,                                 \
+       .e_low = el,                                    \
+       .e_high = eh,                                   \
+       .w_low = wl,                                    \
+       .w_high = wh,                                   \
+       .w_margin = w_tolerance,                        \
+       .t_low = tl,                                    \
+       .t_high = th,                                   \
+       .t_margin = t_tolerance,                        \
+       .b_low = bl,                                    \
+       .b_high = bh,                                   \
+       .event_expiration = event_exp,                  \
+       .pwr_jmp = power_jmp                            \
+}
+
+static const struct mt76x02_radar_specs etsi_radar_specs[] = {
+       /* 20MHz */
+       RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+                  0x7fffffff, 0x155cc0, 0x19cc),
+       RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+                  0x7fffffff, 0x155cc0, 0x19cc),
+       RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+                  0x7fffffff, 0x155cc0, 0x19dd),
+       RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+                  0x7fffffff, 0x2191c0, 0x15cc),
+       /* 40MHz */
+       RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+                  0x7fffffff, 0x155cc0, 0x19cc),
+       RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+                  0x7fffffff, 0x155cc0, 0x19cc),
+       RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+                  0x7fffffff, 0x155cc0, 0x19dd),
+       RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+                  0x7fffffff, 0x2191c0, 0x15cc),
+       /* 80MHz */
+       RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+                  0x7fffffff, 0x155cc0, 0x19cc),
+       RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+                  0x7fffffff, 0x155cc0, 0x19cc),
+       RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+                  0x7fffffff, 0x155cc0, 0x19dd),
+       RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+                  0x7fffffff, 0x2191c0, 0x15cc)
+};
+
+static const struct mt76x02_radar_specs fcc_radar_specs[] = {
+       /* 20MHz */
+       RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+                  0x7fffffff, 0xfe808, 0x13dc),
+       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+                  0x7fffffff, 0xfe808, 0x19dd),
+       RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+                  0x7fffffff, 0xfe808, 0x12cc),
+       RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+                  0x3938700, 0x57bcf00, 0x1289),
+       /* 40MHz */
+       RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+                  0x7fffffff, 0xfe808, 0x13dc),
+       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+                  0x7fffffff, 0xfe808, 0x19dd),
+       RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+                  0x7fffffff, 0xfe808, 0x12cc),
+       RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+                  0x3938700, 0x57bcf00, 0x1289),
+       /* 80MHz */
+       RADAR_SPEC(0, 8, 2, 14, 106, 150, 15, 2900, 80100, 15, 0,
+                  0x7fffffff, 0xfe808, 0x16cc),
+       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+                  0x7fffffff, 0xfe808, 0x19dd),
+       RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+                  0x7fffffff, 0xfe808, 0x12cc),
+       RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+                  0x3938700, 0x57bcf00, 0x1289)
+};
+
+static const struct mt76x02_radar_specs jp_w56_radar_specs[] = {
+       /* 20MHz */
+       RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+                  0x7fffffff, 0x14c080, 0x13dc),
+       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+                  0x7fffffff, 0x14c080, 0x19dd),
+       RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+                  0x7fffffff, 0x14c080, 0x12cc),
+       RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+                  0x3938700, 0X57bcf00, 0x1289),
+       /* 40MHz */
+       RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+                  0x7fffffff, 0x14c080, 0x13dc),
+       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+                  0x7fffffff, 0x14c080, 0x19dd),
+       RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+                  0x7fffffff, 0x14c080, 0x12cc),
+       RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+                  0x3938700, 0X57bcf00, 0x1289),
+       /* 80MHz */
+       RADAR_SPEC(0, 8, 2, 9, 106, 150, 15, 2900, 80100, 15, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+                  0x7fffffff, 0x14c080, 0x19dd),
+       RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+                  0x7fffffff, 0x14c080, 0x12cc),
+       RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+                  0x3938700, 0X57bcf00, 0x1289)
+};
+
+static const struct mt76x02_radar_specs jp_w53_radar_specs[] = {
+       /* 20MHz */
+       RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       { 0 },
+       RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       { 0 },
+       /* 40MHz */
+       RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       { 0 },
+       RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       { 0 },
+       /* 80MHz */
+       RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       { 0 },
+       RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       { 0 }
+};
+
+static void
+mt76x2_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev,
+                                u8 enable)
+{
+       u32 data;
+
+       data = (1 << 1) | enable;
+       mt76_wr(dev, MT_BBP(DFS, 36), data);
+}
+
+static void mt76x2_dfs_seq_pool_put(struct mt76x02_dev *dev,
+                                   struct mt76x02_dfs_sequence *seq)
+{
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+       list_add(&seq->head, &dfs_pd->seq_pool);
+
+       dfs_pd->seq_stats.seq_pool_len++;
+       dfs_pd->seq_stats.seq_len--;
+}
+
+static struct mt76x02_dfs_sequence *
+mt76x2_dfs_seq_pool_get(struct mt76x02_dev *dev)
+{
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+       struct mt76x02_dfs_sequence *seq;
+
+       if (list_empty(&dfs_pd->seq_pool)) {
+               seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC);
+       } else {
+               seq = list_first_entry(&dfs_pd->seq_pool,
+                                      struct mt76x02_dfs_sequence,
+                                      head);
+               list_del(&seq->head);
+               dfs_pd->seq_stats.seq_pool_len--;
+       }
+       if (seq)
+               dfs_pd->seq_stats.seq_len++;
+
+       return seq;
+}
+
+static int mt76x2_dfs_get_multiple(int val, int frac, int margin)
+{
+       int remainder, factor;
+
+       if (!frac)
+               return 0;
+
+       if (abs(val - frac) <= margin)
+               return 1;
+
+       factor = val / frac;
+       remainder = val % frac;
+
+       if (remainder > margin) {
+               if ((frac - remainder) <= margin)
+                       factor++;
+               else
+                       factor = 0;
+       }
+       return factor;
+}
+
+static void mt76x2_dfs_detector_reset(struct mt76x02_dev *dev)
+{
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+       struct mt76x02_dfs_sequence *seq, *tmp_seq;
+       int i;
+
+       /* reset hw detector */
+       mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+       /* reset sw detector */
+       for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
+               dfs_pd->event_rb[i].h_rb = 0;
+               dfs_pd->event_rb[i].t_rb = 0;
+       }
+
+       list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
+               list_del_init(&seq->head);
+               mt76x2_dfs_seq_pool_put(dev, seq);
+       }
+}
+
+static bool mt76x2_dfs_check_chirp(struct mt76x02_dev *dev)
+{
+       bool ret = false;
+       u32 current_ts, delta_ts;
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+       current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER);
+       delta_ts = current_ts - dfs_pd->chirp_pulse_ts;
+       dfs_pd->chirp_pulse_ts = current_ts;
+
+       /* 12 sec */
+       if (delta_ts <= (12 * (1 << 20))) {
+               if (++dfs_pd->chirp_pulse_cnt > 8)
+                       ret = true;
+       } else {
+               dfs_pd->chirp_pulse_cnt = 1;
+       }
+
+       return ret;
+}
+
+static void mt76x2_dfs_get_hw_pulse(struct mt76x02_dev *dev,
+                                   struct mt76x02_dfs_hw_pulse *pulse)
+{
+       u32 data;
+
+       /* select channel */
+       data = (MT_DFS_CH_EN << 16) | pulse->engine;
+       mt76_wr(dev, MT_BBP(DFS, 0), data);
+
+       /* reported period */
+       pulse->period = mt76_rr(dev, MT_BBP(DFS, 19));
+
+       /* reported width */
+       pulse->w1 = mt76_rr(dev, MT_BBP(DFS, 20));
+       pulse->w2 = mt76_rr(dev, MT_BBP(DFS, 23));
+
+       /* reported burst number */
+       pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
+}
+
+static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev,
+                                     struct mt76x02_dfs_hw_pulse *pulse)
+{
+       bool ret = false;
+
+       if (!pulse->period || !pulse->w1)
+               return false;
+
+       switch (dev->dfs_pd.region) {
+       case NL80211_DFS_FCC:
+               if (pulse->engine > 3)
+                       break;
+
+               if (pulse->engine == 3) {
+                       ret = mt76x2_dfs_check_chirp(dev);
+                       break;
+               }
+
+               /* check short pulse*/
+               if (pulse->w1 < 120)
+                       ret = (pulse->period >= 2900 &&
+                              (pulse->period <= 4700 ||
+                               pulse->period >= 6400) &&
+                              (pulse->period <= 6800 ||
+                               pulse->period >= 10200) &&
+                              pulse->period <= 61600);
+               else if (pulse->w1 < 130) /* 120 - 130 */
+                       ret = (pulse->period >= 2900 &&
+                              pulse->period <= 61600);
+               else
+                       ret = (pulse->period >= 3500 &&
+                              pulse->period <= 10100);
+               break;
+       case NL80211_DFS_ETSI:
+               if (pulse->engine >= 3)
+                       break;
+
+               ret = (pulse->period >= 4900 &&
+                      (pulse->period <= 10200 ||
+                       pulse->period >= 12400) &&
+                      pulse->period <= 100100);
+               break;
+       case NL80211_DFS_JP:
+               if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+                   dev->mt76.chandef.chan->center_freq <= 5350) {
+                       /* JPW53 */
+                       if (pulse->w1 <= 130)
+                               ret = (pulse->period >= 28360 &&
+                                      (pulse->period <= 28700 ||
+                                       pulse->period >= 76900) &&
+                                      pulse->period <= 76940);
+                       break;
+               }
+
+               if (pulse->engine > 3)
+                       break;
+
+               if (pulse->engine == 3) {
+                       ret = mt76x2_dfs_check_chirp(dev);
+                       break;
+               }
+
+               /* check short pulse*/
+               if (pulse->w1 < 120)
+                       ret = (pulse->period >= 2900 &&
+                              (pulse->period <= 4700 ||
+                               pulse->period >= 6400) &&
+                              (pulse->period <= 6800 ||
+                               pulse->period >= 27560) &&
+                              (pulse->period <= 27960 ||
+                               pulse->period >= 28360) &&
+                              (pulse->period <= 28700 ||
+                               pulse->period >= 79900) &&
+                              pulse->period <= 80100);
+               else if (pulse->w1 < 130) /* 120 - 130 */
+                       ret = (pulse->period >= 2900 &&
+                              (pulse->period <= 10100 ||
+                               pulse->period >= 27560) &&
+                              (pulse->period <= 27960 ||
+                               pulse->period >= 28360) &&
+                              (pulse->period <= 28700 ||
+                               pulse->period >= 79900) &&
+                              pulse->period <= 80100);
+               else
+                       ret = (pulse->period >= 3900 &&
+                              pulse->period <= 10100);
+               break;
+       case NL80211_DFS_UNSET:
+       default:
+               return false;
+       }
+
+       return ret;
+}
+
+static bool mt76x2_dfs_fetch_event(struct mt76x02_dev *dev,
+                                  struct mt76x02_dfs_event *event)
+{
+       u32 data;
+
+       /* 1st: DFS_R37[31]: 0 (engine 0) - 1 (engine 2)
+        * 2nd: DFS_R37[21:0]: pulse time
+        * 3rd: DFS_R37[11:0]: pulse width
+        * 3rd: DFS_R37[25:16]: phase
+        * 4th: DFS_R37[12:0]: current pwr
+        * 4th: DFS_R37[21:16]: pwr stable counter
+        *
+        * 1st: DFS_R37[31:0] set to 0xffffffff means no event detected
+        */
+       data = mt76_rr(dev, MT_BBP(DFS, 37));
+       if (!MT_DFS_CHECK_EVENT(data))
+               return false;
+
+       event->engine = MT_DFS_EVENT_ENGINE(data);
+       data = mt76_rr(dev, MT_BBP(DFS, 37));
+       event->ts = MT_DFS_EVENT_TIMESTAMP(data);
+       data = mt76_rr(dev, MT_BBP(DFS, 37));
+       event->width = MT_DFS_EVENT_WIDTH(data);
+
+       return true;
+}
+
+static bool mt76x2_dfs_check_event(struct mt76x02_dev *dev,
+                                  struct mt76x02_dfs_event *event)
+{
+       if (event->engine == 2) {
+               struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+               struct mt76x02_dfs_event_rb *event_buff = &dfs_pd->event_rb[1];
+               u16 last_event_idx;
+               u32 delta_ts;
+
+               last_event_idx = mt76_decr(event_buff->t_rb,
+                                          MT_DFS_EVENT_BUFLEN);
+               delta_ts = event->ts - event_buff->data[last_event_idx].ts;
+               if (delta_ts < MT_DFS_EVENT_TIME_MARGIN &&
+                   event_buff->data[last_event_idx].width >= 200)
+                       return false;
+       }
+       return true;
+}
+
+static void mt76x2_dfs_queue_event(struct mt76x02_dev *dev,
+                                  struct mt76x02_dfs_event *event)
+{
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+       struct mt76x02_dfs_event_rb *event_buff;
+
+       /* add radar event to ring buffer */
+       event_buff = event->engine == 2 ? &dfs_pd->event_rb[1]
+                                       : &dfs_pd->event_rb[0];
+       event_buff->data[event_buff->t_rb] = *event;
+       event_buff->data[event_buff->t_rb].fetch_ts = jiffies;
+
+       event_buff->t_rb = mt76_incr(event_buff->t_rb, MT_DFS_EVENT_BUFLEN);
+       if (event_buff->t_rb == event_buff->h_rb)
+               event_buff->h_rb = mt76_incr(event_buff->h_rb,
+                                            MT_DFS_EVENT_BUFLEN);
+}
+
+static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev,
+                                     struct mt76x02_dfs_event *event,
+                                     u16 cur_len)
+{
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+       struct mt76x02_dfs_sw_detector_params *sw_params;
+       u32 width_delta, with_sum, factor, cur_pri;
+       struct mt76x02_dfs_sequence seq, *seq_p;
+       struct mt76x02_dfs_event_rb *event_rb;
+       struct mt76x02_dfs_event *cur_event;
+       int i, j, end, pri;
+
+       event_rb = event->engine == 2 ? &dfs_pd->event_rb[1]
+                                     : &dfs_pd->event_rb[0];
+
+       i = mt76_decr(event_rb->t_rb, MT_DFS_EVENT_BUFLEN);
+       end = mt76_decr(event_rb->h_rb, MT_DFS_EVENT_BUFLEN);
+
+       while (i != end) {
+               cur_event = &event_rb->data[i];
+               with_sum = event->width + cur_event->width;
+
+               sw_params = &dfs_pd->sw_dpd_params;
+               switch (dev->dfs_pd.region) {
+               case NL80211_DFS_FCC:
+               case NL80211_DFS_JP:
+                       if (with_sum < 600)
+                               width_delta = 8;
+                       else
+                               width_delta = with_sum >> 3;
+                       break;
+               case NL80211_DFS_ETSI:
+                       if (event->engine == 2)
+                               width_delta = with_sum >> 6;
+                       else if (with_sum < 620)
+                               width_delta = 24;
+                       else
+                               width_delta = 8;
+                       break;
+               case NL80211_DFS_UNSET:
+               default:
+                       return -EINVAL;
+               }
+
+               pri = event->ts - cur_event->ts;
+               if (abs(event->width - cur_event->width) > width_delta ||
+                   pri < sw_params->min_pri)
+                       goto next;
+
+               if (pri > sw_params->max_pri)
+                       break;
+
+               seq.pri = event->ts - cur_event->ts;
+               seq.first_ts = cur_event->ts;
+               seq.last_ts = event->ts;
+               seq.engine = event->engine;
+               seq.count = 2;
+
+               j = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
+               while (j != end) {
+                       cur_event = &event_rb->data[j];
+                       cur_pri = event->ts - cur_event->ts;
+                       factor = mt76x2_dfs_get_multiple(cur_pri, seq.pri,
+                                               sw_params->pri_margin);
+                       if (factor > 0) {
+                               seq.first_ts = cur_event->ts;
+                               seq.count++;
+                       }
+
+                       j = mt76_decr(j, MT_DFS_EVENT_BUFLEN);
+               }
+               if (seq.count <= cur_len)
+                       goto next;
+
+               seq_p = mt76x2_dfs_seq_pool_get(dev);
+               if (!seq_p)
+                       return -ENOMEM;
+
+               *seq_p = seq;
+               INIT_LIST_HEAD(&seq_p->head);
+               list_add(&seq_p->head, &dfs_pd->sequences);
+next:
+               i = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
+       }
+       return 0;
+}
+
+static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
+                                           struct mt76x02_dfs_event *event)
+{
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+       struct mt76x02_dfs_sw_detector_params *sw_params;
+       struct mt76x02_dfs_sequence *seq, *tmp_seq;
+       u16 max_seq_len = 0;
+       u32 factor, pri;
+
+       sw_params = &dfs_pd->sw_dpd_params;
+       list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
+               if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) {
+                       list_del_init(&seq->head);
+                       mt76x2_dfs_seq_pool_put(dev, seq);
+                       continue;
+               }
+
+               if (event->engine != seq->engine)
+                       continue;
+
+               pri = event->ts - seq->last_ts;
+               factor = mt76x2_dfs_get_multiple(pri, seq->pri,
+                                                sw_params->pri_margin);
+               if (factor > 0) {
+                       seq->last_ts = event->ts;
+                       seq->count++;
+                       max_seq_len = max_t(u16, max_seq_len, seq->count);
+               }
+       }
+       return max_seq_len;
+}
+
+static bool mt76x2_dfs_check_detection(struct mt76x02_dev *dev)
+{
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+       struct mt76x02_dfs_sequence *seq;
+
+       if (list_empty(&dfs_pd->sequences))
+               return false;
+
+       list_for_each_entry(seq, &dfs_pd->sequences, head) {
+               if (seq->count > MT_DFS_SEQUENCE_TH) {
+                       dfs_pd->stats[seq->engine].sw_pattern++;
+                       return true;
+               }
+       }
+       return false;
+}
+
+static void mt76x2_dfs_add_events(struct mt76x02_dev *dev)
+{
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+       struct mt76x02_dfs_event event;
+       int i, seq_len;
+
+       /* disable debug mode */
+       mt76x2_dfs_set_capture_mode_ctrl(dev, false);
+       for (i = 0; i < MT_DFS_EVENT_LOOP; i++) {
+               if (!mt76x2_dfs_fetch_event(dev, &event))
+                       break;
+
+               if (dfs_pd->last_event_ts > event.ts)
+                       mt76x2_dfs_detector_reset(dev);
+               dfs_pd->last_event_ts = event.ts;
+
+               if (!mt76x2_dfs_check_event(dev, &event))
+                       continue;
+
+               seq_len = mt76x2_dfs_add_event_to_sequence(dev, &event);
+               mt76x2_dfs_create_sequence(dev, &event, seq_len);
+
+               mt76x2_dfs_queue_event(dev, &event);
+       }
+       mt76x2_dfs_set_capture_mode_ctrl(dev, true);
+}
+
+static void mt76x2_dfs_check_event_window(struct mt76x02_dev *dev)
+{
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+       struct mt76x02_dfs_event_rb *event_buff;
+       struct mt76x02_dfs_event *event;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
+               event_buff = &dfs_pd->event_rb[i];
+
+               while (event_buff->h_rb != event_buff->t_rb) {
+                       event = &event_buff->data[event_buff->h_rb];
+
+                       /* sorted list */
+                       if (time_is_after_jiffies(event->fetch_ts +
+                                                 MT_DFS_EVENT_WINDOW))
+                               break;
+                       event_buff->h_rb = mt76_incr(event_buff->h_rb,
+                                                    MT_DFS_EVENT_BUFLEN);
+               }
+       }
+}
+
+static void mt76x2_dfs_tasklet(unsigned long arg)
+{
+       struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+       u32 engine_mask;
+       int i;
+
+       if (test_bit(MT76_SCANNING, &dev->mt76.state))
+               goto out;
+
+       if (time_is_before_jiffies(dfs_pd->last_sw_check +
+                                  MT_DFS_SW_TIMEOUT)) {
+               bool radar_detected;
+
+               dfs_pd->last_sw_check = jiffies;
+
+               mt76x2_dfs_add_events(dev);
+               radar_detected = mt76x2_dfs_check_detection(dev);
+               if (radar_detected) {
+                       /* sw detector rx radar pattern */
+                       ieee80211_radar_detected(dev->mt76.hw);
+                       mt76x2_dfs_detector_reset(dev);
+
+                       return;
+               }
+               mt76x2_dfs_check_event_window(dev);
+       }
+
+       engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
+       if (!(engine_mask & 0xf))
+               goto out;
+
+       for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+               struct mt76x02_dfs_hw_pulse pulse;
+
+               if (!(engine_mask & (1 << i)))
+                       continue;
+
+               pulse.engine = i;
+               mt76x2_dfs_get_hw_pulse(dev, &pulse);
+
+               if (!mt76x2_dfs_check_hw_pulse(dev, &pulse)) {
+                       dfs_pd->stats[i].hw_pulse_discarded++;
+                       continue;
+               }
+
+               /* hw detector rx radar pattern */
+               dfs_pd->stats[i].hw_pattern++;
+               ieee80211_radar_detected(dev->mt76.hw);
+               mt76x2_dfs_detector_reset(dev);
+
+               return;
+       }
+
+       /* reset hw detector */
+       mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+out:
+       mt76x02_irq_enable(dev, MT_INT_GPTIMER);
+}
+
+static void mt76x2_dfs_init_sw_detector(struct mt76x02_dev *dev)
+{
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+       switch (dev->dfs_pd.region) {
+       case NL80211_DFS_FCC:
+               dfs_pd->sw_dpd_params.max_pri = MT_DFS_FCC_MAX_PRI;
+               dfs_pd->sw_dpd_params.min_pri = MT_DFS_FCC_MIN_PRI;
+               dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
+               break;
+       case NL80211_DFS_ETSI:
+               dfs_pd->sw_dpd_params.max_pri = MT_DFS_ETSI_MAX_PRI;
+               dfs_pd->sw_dpd_params.min_pri = MT_DFS_ETSI_MIN_PRI;
+               dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN << 2;
+               break;
+       case NL80211_DFS_JP:
+               dfs_pd->sw_dpd_params.max_pri = MT_DFS_JP_MAX_PRI;
+               dfs_pd->sw_dpd_params.min_pri = MT_DFS_JP_MIN_PRI;
+               dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
+               break;
+       case NL80211_DFS_UNSET:
+       default:
+               break;
+       }
+}
+
+static void mt76x2_dfs_set_bbp_params(struct mt76x02_dev *dev)
+{
+       const struct mt76x02_radar_specs *radar_specs;
+       u8 i, shift;
+       u32 data;
+
+       switch (dev->mt76.chandef.width) {
+       case NL80211_CHAN_WIDTH_40:
+               shift = MT_DFS_NUM_ENGINES;
+               break;
+       case NL80211_CHAN_WIDTH_80:
+               shift = 2 * MT_DFS_NUM_ENGINES;
+               break;
+       default:
+               shift = 0;
+               break;
+       }
+
+       switch (dev->dfs_pd.region) {
+       case NL80211_DFS_FCC:
+               radar_specs = &fcc_radar_specs[shift];
+               break;
+       case NL80211_DFS_ETSI:
+               radar_specs = &etsi_radar_specs[shift];
+               break;
+       case NL80211_DFS_JP:
+               if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+                   dev->mt76.chandef.chan->center_freq <= 5350)
+                       radar_specs = &jp_w53_radar_specs[shift];
+               else
+                       radar_specs = &jp_w56_radar_specs[shift];
+               break;
+       case NL80211_DFS_UNSET:
+       default:
+               return;
+       }
+
+       data = (MT_DFS_VGA_MASK << 16) |
+              (MT_DFS_PWR_GAIN_OFFSET << 12) |
+              (MT_DFS_PWR_DOWN_TIME << 8) |
+              (MT_DFS_SYM_ROUND << 4) |
+              (MT_DFS_DELTA_DELAY & 0xf);
+       mt76_wr(dev, MT_BBP(DFS, 2), data);
+
+       data = (MT_DFS_RX_PE_MASK << 16) | MT_DFS_PKT_END_MASK;
+       mt76_wr(dev, MT_BBP(DFS, 3), data);
+
+       for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+               /* configure engine */
+               mt76_wr(dev, MT_BBP(DFS, 0), i);
+
+               /* detection mode + avg_len */
+               data = ((radar_specs[i].avg_len & 0x1ff) << 16) |
+                      (radar_specs[i].mode & 0xf);
+               mt76_wr(dev, MT_BBP(DFS, 4), data);
+
+               /* dfs energy */
+               data = ((radar_specs[i].e_high & 0x0fff) << 16) |
+                      (radar_specs[i].e_low & 0x0fff);
+               mt76_wr(dev, MT_BBP(DFS, 5), data);
+
+               /* dfs period */
+               mt76_wr(dev, MT_BBP(DFS, 7), radar_specs[i].t_low);
+               mt76_wr(dev, MT_BBP(DFS, 9), radar_specs[i].t_high);
+
+               /* dfs burst */
+               mt76_wr(dev, MT_BBP(DFS, 11), radar_specs[i].b_low);
+               mt76_wr(dev, MT_BBP(DFS, 13), radar_specs[i].b_high);
+
+               /* dfs width */
+               data = ((radar_specs[i].w_high & 0x0fff) << 16) |
+                      (radar_specs[i].w_low & 0x0fff);
+               mt76_wr(dev, MT_BBP(DFS, 14), data);
+
+               /* dfs margins */
+               data = (radar_specs[i].w_margin << 16) |
+                      radar_specs[i].t_margin;
+               mt76_wr(dev, MT_BBP(DFS, 15), data);
+
+               /* dfs event expiration */
+               mt76_wr(dev, MT_BBP(DFS, 17), radar_specs[i].event_expiration);
+
+               /* dfs pwr adj */
+               mt76_wr(dev, MT_BBP(DFS, 30), radar_specs[i].pwr_jmp);
+       }
+
+       /* reset status */
+       mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+       mt76_wr(dev, MT_BBP(DFS, 36), 0x3);
+
+       /* enable detection*/
+       mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
+       mt76_wr(dev, 0x212c, 0x0c350001);
+}
+
+void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev)
+{
+       u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
+
+       agc_r8 = mt76_rr(dev, MT_BBP(AGC, 8));
+       agc_r4 = mt76_rr(dev, MT_BBP(AGC, 4));
+
+       val_r8 = (agc_r8 & 0x00007e00) >> 9;
+       val_r4 = agc_r4 & ~0x1f000000;
+       val_r4 += (((val_r8 + 1) >> 1) << 24);
+       mt76_wr(dev, MT_BBP(AGC, 4), val_r4);
+
+       dfs_r31 = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, val_r4);
+       dfs_r31 += val_r8;
+       dfs_r31 -= (agc_r8 & 0x00000038) >> 3;
+       dfs_r31 = (dfs_r31 << 16) | 0x00000307;
+       mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31);
+
+       mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
+}
+
+void mt76x2_dfs_init_params(struct mt76x02_dev *dev)
+{
+       struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+
+       if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+           dev->dfs_pd.region != NL80211_DFS_UNSET) {
+               mt76x2_dfs_init_sw_detector(dev);
+               mt76x2_dfs_set_bbp_params(dev);
+               /* enable debug mode */
+               mt76x2_dfs_set_capture_mode_ctrl(dev, true);
+
+               mt76x02_irq_enable(dev, MT_INT_GPTIMER);
+               mt76_rmw_field(dev, MT_INT_TIMER_EN,
+                              MT_INT_TIMER_EN_GP_TIMER_EN, 1);
+       } else {
+               /* disable hw detector */
+               mt76_wr(dev, MT_BBP(DFS, 0), 0);
+               /* clear detector status */
+               mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+               mt76_wr(dev, 0x212c, 0);
+
+               mt76x02_irq_disable(dev, MT_INT_GPTIMER);
+               mt76_rmw_field(dev, MT_INT_TIMER_EN,
+                              MT_INT_TIMER_EN_GP_TIMER_EN, 0);
+       }
+}
+
+void mt76x2_dfs_init_detector(struct mt76x02_dev *dev)
+{
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+       INIT_LIST_HEAD(&dfs_pd->sequences);
+       INIT_LIST_HEAD(&dfs_pd->seq_pool);
+       dfs_pd->region = NL80211_DFS_UNSET;
+       dfs_pd->last_sw_check = jiffies;
+       tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet,
+                    (unsigned long)dev);
+}
+
+void mt76x2_dfs_set_domain(struct mt76x02_dev *dev,
+                          enum nl80211_dfs_regions region)
+{
+       struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+       if (dfs_pd->region != region) {
+               tasklet_disable(&dfs_pd->dfs_tasklet);
+               dfs_pd->region = region;
+               mt76x2_dfs_init_params(dev);
+               tasklet_enable(&dfs_pd->dfs_tasklet);
+       }
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
new file mode 100644 (file)
index 0000000..f229c6e
--- /dev/null
@@ -0,0 +1,529 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "eeprom.h"
+#include "mcu.h"
+
+static void
+mt76x2_mac_pbf_init(struct mt76x02_dev *dev)
+{
+       u32 val;
+
+       val = MT_PBF_SYS_CTRL_MCU_RESET |
+             MT_PBF_SYS_CTRL_DMA_RESET |
+             MT_PBF_SYS_CTRL_MAC_RESET |
+             MT_PBF_SYS_CTRL_PBF_RESET |
+             MT_PBF_SYS_CTRL_ASY_RESET;
+
+       mt76_set(dev, MT_PBF_SYS_CTRL, val);
+       mt76_clear(dev, MT_PBF_SYS_CTRL, val);
+
+       mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+       mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+}
+
+static void
+mt76x2_fixup_xtal(struct mt76x02_dev *dev)
+{
+       u16 eep_val;
+       s8 offset = 0;
+
+       eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2);
+
+       offset = eep_val & 0x7f;
+       if ((eep_val & 0xff) == 0xff)
+               offset = 0;
+       else if (eep_val & 0x80)
+               offset = 0 - offset;
+
+       eep_val >>= 8;
+       if (eep_val == 0x00 || eep_val == 0xff) {
+               eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1);
+               eep_val &= 0xff;
+
+               if (eep_val == 0x00 || eep_val == 0xff)
+                       eep_val = 0x14;
+       }
+
+       eep_val &= 0x7f;
+       mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
+       mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
+
+       eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2);
+       switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+       case 0:
+               mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+               break;
+       case 1:
+               mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+               break;
+       default:
+               break;
+       }
+}
+
+static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
+{
+       static const u8 null_addr[ETH_ALEN] = {};
+       const u8 *macaddr = dev->mt76.macaddr;
+       u32 val;
+       int i, k;
+
+       if (!mt76x02_wait_for_mac(&dev->mt76))
+               return -ETIMEDOUT;
+
+       val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+
+       val &= ~(MT_WPDMA_GLO_CFG_TX_DMA_EN |
+                MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+                MT_WPDMA_GLO_CFG_RX_DMA_EN |
+                MT_WPDMA_GLO_CFG_RX_DMA_BUSY |
+                MT_WPDMA_GLO_CFG_DMA_BURST_SIZE);
+       val |= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3);
+
+       mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+
+       mt76x2_mac_pbf_init(dev);
+       mt76_write_mac_initvals(dev);
+       mt76x2_fixup_xtal(dev);
+
+       mt76_clear(dev, MT_MAC_SYS_CTRL,
+                  MT_MAC_SYS_CTRL_RESET_CSR |
+                  MT_MAC_SYS_CTRL_RESET_BBP);
+
+       if (is_mt7612(dev))
+               mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+       mt76_set(dev, MT_EXT_CCA_CFG, 0x0000f000);
+       mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+       mt76_wr(dev, MT_RF_BYPASS_0, 0x06000000);
+       mt76_wr(dev, MT_RF_SETTING_0, 0x08800000);
+       usleep_range(5000, 10000);
+       mt76_wr(dev, MT_RF_BYPASS_0, 0x00000000);
+
+       mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401);
+       mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+       mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr));
+       mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4));
+
+       mt76_wr(dev, MT_MAC_BSSID_DW0, get_unaligned_le32(macaddr));
+       mt76_wr(dev, MT_MAC_BSSID_DW1, get_unaligned_le16(macaddr + 4) |
+               FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */
+               MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
+
+       /* Fire a pre-TBTT interrupt 8 ms before TBTT */
+       mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
+                      8 << 4);
+       mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
+                      MT_DFS_GP_INTERVAL);
+       mt76_wr(dev, MT_INT_TIMER_EN, 0);
+
+       mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
+       if (!hard)
+               return 0;
+
+       for (i = 0; i < 256 / 32; i++)
+               mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
+
+       for (i = 0; i < 256; i++)
+               mt76x02_mac_wcid_setup(&dev->mt76, i, 0, NULL);
+
+       for (i = 0; i < MT_MAX_VIFS; i++)
+               mt76x02_mac_wcid_setup(&dev->mt76, MT_VIF_WCID(i), i, NULL);
+
+       for (i = 0; i < 16; i++)
+               for (k = 0; k < 4; k++)
+                       mt76x02_mac_shared_key_setup(&dev->mt76, i, k, NULL);
+
+       for (i = 0; i < 8; i++) {
+               mt76x2_mac_set_bssid(dev, i, null_addr);
+               mt76x2_mac_set_beacon(dev, i, NULL);
+       }
+
+       for (i = 0; i < 16; i++)
+               mt76_rr(dev, MT_TX_STAT_FIFO);
+
+       mt76_wr(dev, MT_CH_TIME_CFG,
+               MT_CH_TIME_CFG_TIMER_EN |
+               MT_CH_TIME_CFG_TX_AS_BUSY |
+               MT_CH_TIME_CFG_RX_AS_BUSY |
+               MT_CH_TIME_CFG_NAV_AS_BUSY |
+               MT_CH_TIME_CFG_EIFS_AS_BUSY |
+               FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
+
+       mt76x02_set_beacon_offsets(&dev->mt76);
+
+       mt76x2_set_tx_ackto(dev);
+
+       return 0;
+}
+
+int mt76x2_mac_start(struct mt76x02_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < 16; i++)
+               mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+       for (i = 0; i < 16; i++)
+               mt76_rr(dev, MT_TX_STAT_FIFO);
+
+       memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats));
+       mt76x02_mac_start(dev);
+
+       return 0;
+}
+
+void mt76x2_mac_resume(struct mt76x02_dev *dev)
+{
+       mt76_wr(dev, MT_MAC_SYS_CTRL,
+               MT_MAC_SYS_CTRL_ENABLE_TX |
+               MT_MAC_SYS_CTRL_ENABLE_RX);
+}
+
+static void
+mt76x2_power_on_rf_patch(struct mt76x02_dev *dev)
+{
+       mt76_set(dev, 0x10130, BIT(0) | BIT(16));
+       udelay(1);
+
+       mt76_clear(dev, 0x1001c, 0xff);
+       mt76_set(dev, 0x1001c, 0x30);
+
+       mt76_wr(dev, 0x10014, 0x484f);
+       udelay(1);
+
+       mt76_set(dev, 0x10130, BIT(17));
+       udelay(125);
+
+       mt76_clear(dev, 0x10130, BIT(16));
+       udelay(50);
+
+       mt76_set(dev, 0x1014c, BIT(19) | BIT(20));
+}
+
+static void
+mt76x2_power_on_rf(struct mt76x02_dev *dev, int unit)
+{
+       int shift = unit ? 8 : 0;
+
+       /* Enable RF BG */
+       mt76_set(dev, 0x10130, BIT(0) << shift);
+       udelay(10);
+
+       /* Enable RFDIG LDO/AFE/ABB/ADDA */
+       mt76_set(dev, 0x10130, (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift);
+       udelay(10);
+
+       /* Switch RFDIG power to internal LDO */
+       mt76_clear(dev, 0x10130, BIT(2) << shift);
+       udelay(10);
+
+       mt76x2_power_on_rf_patch(dev);
+
+       mt76_set(dev, 0x530, 0xf);
+}
+
+static void
+mt76x2_power_on(struct mt76x02_dev *dev)
+{
+       u32 val;
+
+       /* Turn on WL MTCMOS */
+       mt76_set(dev, MT_WLAN_MTC_CTRL, MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+       val = MT_WLAN_MTC_CTRL_STATE_UP |
+             MT_WLAN_MTC_CTRL_PWR_ACK |
+             MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+       mt76_poll(dev, MT_WLAN_MTC_CTRL, val, val, 1000);
+
+       mt76_clear(dev, MT_WLAN_MTC_CTRL, 0x7f << 16);
+       udelay(10);
+
+       mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+       udelay(10);
+
+       mt76_set(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+       mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xfff);
+
+       /* Turn on AD/DA power down */
+       mt76_clear(dev, 0x11204, BIT(3));
+
+       /* WLAN function enable */
+       mt76_set(dev, 0x10080, BIT(0));
+
+       /* Release BBP software reset */
+       mt76_clear(dev, 0x10064, BIT(18));
+
+       mt76x2_power_on_rf(dev, 0);
+       mt76x2_power_on_rf(dev, 1);
+}
+
+void mt76x2_set_tx_ackto(struct mt76x02_dev *dev)
+{
+       u8 ackto, sifs, slottime = dev->slottime;
+
+       /* As defined by IEEE 802.11-2007 17.3.8.6 */
+       slottime += 3 * dev->coverage_class;
+       mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+                      MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+
+       sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG,
+                             MT_XIFS_TIME_CFG_OFDM_SIFS);
+
+       ackto = slottime + sifs;
+       mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG,
+                      MT_TX_TIMEOUT_CFG_ACKTO, ackto);
+}
+
+int mt76x2_init_hardware(struct mt76x02_dev *dev)
+{
+       int ret;
+
+       tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
+                    (unsigned long) dev);
+
+       mt76x02_dma_disable(dev);
+       mt76x2_reset_wlan(dev, true);
+       mt76x2_power_on(dev);
+
+       ret = mt76x2_eeprom_init(dev);
+       if (ret)
+               return ret;
+
+       ret = mt76x2_mac_reset(dev, true);
+       if (ret)
+               return ret;
+
+       dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+       ret = mt76x02_dma_init(dev);
+       if (ret)
+               return ret;
+
+       set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+       ret = mt76x2_mac_start(dev);
+       if (ret)
+               return ret;
+
+       ret = mt76x2_mcu_init(dev);
+       if (ret)
+               return ret;
+
+       mt76x2_mac_stop(dev, false);
+
+       return 0;
+}
+
+void mt76x2_stop_hardware(struct mt76x02_dev *dev)
+{
+       cancel_delayed_work_sync(&dev->cal_work);
+       cancel_delayed_work_sync(&dev->mac_work);
+       mt76x02_mcu_set_radio_state(&dev->mt76, false, true);
+       mt76x2_mac_stop(dev, false);
+}
+
+void mt76x2_cleanup(struct mt76x02_dev *dev)
+{
+       tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+       tasklet_disable(&dev->pre_tbtt_tasklet);
+       mt76x2_stop_hardware(dev);
+       mt76x02_dma_cleanup(dev);
+       mt76x02_mcu_cleanup(&dev->mt76);
+}
+
+struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)
+{
+       static const struct mt76_driver_ops drv_ops = {
+               .txwi_size = sizeof(struct mt76x02_txwi),
+               .update_survey = mt76x2_update_channel,
+               .tx_prepare_skb = mt76x02_tx_prepare_skb,
+               .tx_complete_skb = mt76x02_tx_complete_skb,
+               .rx_skb = mt76x02_queue_rx_skb,
+               .rx_poll_complete = mt76x02_rx_poll_complete,
+               .sta_ps = mt76x2_sta_ps,
+       };
+       struct mt76x02_dev *dev;
+       struct mt76_dev *mdev;
+
+       mdev = mt76_alloc_device(sizeof(*dev), &mt76x2_ops);
+       if (!mdev)
+               return NULL;
+
+       dev = container_of(mdev, struct mt76x02_dev, mt76);
+       mdev->dev = pdev;
+       mdev->drv = &drv_ops;
+
+       return dev;
+}
+
+static void mt76x2_regd_notifier(struct wiphy *wiphy,
+                                struct regulatory_request *request)
+{
+       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+       struct mt76x02_dev *dev = hw->priv;
+
+       mt76x2_dfs_set_domain(dev, request->dfs_region);
+}
+
+static const struct ieee80211_iface_limit if_limits[] = {
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_ADHOC)
+       }, {
+               .max = 8,
+               .types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+                        BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+                        BIT(NL80211_IFTYPE_AP)
+        },
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+       {
+               .limits = if_limits,
+               .n_limits = ARRAY_SIZE(if_limits),
+               .max_interfaces = 8,
+               .num_different_channels = 1,
+               .beacon_int_infra_match = true,
+               .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+                                      BIT(NL80211_CHAN_WIDTH_20) |
+                                      BIT(NL80211_CHAN_WIDTH_40) |
+                                      BIT(NL80211_CHAN_WIDTH_80),
+       }
+};
+
+static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on,
+                                 u8 delay_off)
+{
+       struct mt76x02_dev *dev = container_of(mt76, struct mt76x02_dev,
+                                              mt76);
+       u32 val;
+
+       val = MT_LED_STATUS_DURATION(0xff) |
+             MT_LED_STATUS_OFF(delay_off) |
+             MT_LED_STATUS_ON(delay_on);
+
+       mt76_wr(dev, MT_LED_S0(mt76->led_pin), val);
+       mt76_wr(dev, MT_LED_S1(mt76->led_pin), val);
+
+       val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+             MT_LED_CTRL_KICK(mt76->led_pin);
+       if (mt76->led_al)
+               val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+       mt76_wr(dev, MT_LED_CTRL, val);
+}
+
+static int mt76x2_led_set_blink(struct led_classdev *led_cdev,
+                               unsigned long *delay_on,
+                               unsigned long *delay_off)
+{
+       struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+                                            led_cdev);
+       u8 delta_on, delta_off;
+
+       delta_off = max_t(u8, *delay_off / 10, 1);
+       delta_on = max_t(u8, *delay_on / 10, 1);
+
+       mt76x2_led_set_config(mt76, delta_on, delta_off);
+       return 0;
+}
+
+static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
+                                     enum led_brightness brightness)
+{
+       struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+                                            led_cdev);
+
+       if (!brightness)
+               mt76x2_led_set_config(mt76, 0, 0xff);
+       else
+               mt76x2_led_set_config(mt76, 0xff, 0);
+}
+
+int mt76x2_register_device(struct mt76x02_dev *dev)
+{
+       struct ieee80211_hw *hw = mt76_hw(dev);
+       struct wiphy *wiphy = hw->wiphy;
+       int i, ret;
+
+       INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
+       INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+
+       mt76x2_init_device(dev);
+
+       ret = mt76x2_init_hardware(dev);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
+               u8 *addr = dev->macaddr_list[i].addr;
+
+               memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
+
+               if (!i)
+                       continue;
+
+               addr[0] |= BIT(1);
+               addr[0] ^= ((i - 1) << 2);
+       }
+       wiphy->addresses = dev->macaddr_list;
+       wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
+
+       wiphy->iface_combinations = if_comb;
+       wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+
+       wiphy->reg_notifier = mt76x2_regd_notifier;
+
+       wiphy->interface_modes =
+               BIT(NL80211_IFTYPE_STATION) |
+               BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+               BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+               BIT(NL80211_IFTYPE_ADHOC);
+
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+       mt76x2_dfs_init_detector(dev);
+
+       /* init led callbacks */
+       dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
+       dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
+
+       ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
+                                  ARRAY_SIZE(mt76x02_rates));
+       if (ret)
+               goto fail;
+
+       mt76x2_init_debugfs(dev);
+       mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
+       mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
+       return 0;
+
+fail:
+       mt76x2_stop_hardware(dev);
+       return ret;
+}
+
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
new file mode 100644 (file)
index 0000000..08366c5
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
+{
+       idx &= 7;
+       mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
+       mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
+                      get_unaligned_le16(addr + 4));
+}
+
+static int
+mt76_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
+{
+       int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
+       struct mt76x02_txwi txwi;
+
+       if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
+               return -ENOSPC;
+
+       mt76x02_mac_write_txwi(&dev->mt76, &txwi, skb, NULL, NULL, skb->len);
+
+       mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+       offset += sizeof(txwi);
+
+       mt76_wr_copy(dev, offset, skb->data, skb->len);
+       return 0;
+}
+
+static int
+__mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx, struct sk_buff *skb)
+{
+       int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
+       int beacon_addr = mt76x02_beacon_offsets[bcn_idx];
+       int ret = 0;
+       int i;
+
+       /* Prevent corrupt transmissions during update */
+       mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
+
+       if (skb) {
+               ret = mt76_write_beacon(dev, beacon_addr, skb);
+               if (!ret)
+                       dev->beacon_data_mask |= BIT(bcn_idx);
+       } else {
+               dev->beacon_data_mask &= ~BIT(bcn_idx);
+               for (i = 0; i < beacon_len; i += 4)
+                       mt76_wr(dev, beacon_addr + i, 0);
+       }
+
+       mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
+
+       return ret;
+}
+
+int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
+                         struct sk_buff *skb)
+{
+       bool force_update = false;
+       int bcn_idx = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
+               if (vif_idx == i) {
+                       force_update = !!dev->beacons[i] ^ !!skb;
+
+                       if (dev->beacons[i])
+                               dev_kfree_skb(dev->beacons[i]);
+
+                       dev->beacons[i] = skb;
+                       __mt76x2_mac_set_beacon(dev, bcn_idx, skb);
+               } else if (force_update && dev->beacons[i]) {
+                       __mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
+               }
+
+               bcn_idx += !!dev->beacons[i];
+       }
+
+       for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
+               if (!(dev->beacon_data_mask & BIT(i)))
+                       break;
+
+               __mt76x2_mac_set_beacon(dev, i, NULL);
+       }
+
+       mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
+                      bcn_idx - 1);
+       return 0;
+}
+
+void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev,
+                                 u8 vif_idx, bool val)
+{
+       u8 old_mask = dev->beacon_mask;
+       bool en;
+       u32 reg;
+
+       if (val) {
+               dev->beacon_mask |= BIT(vif_idx);
+       } else {
+               dev->beacon_mask &= ~BIT(vif_idx);
+               mt76x2_mac_set_beacon(dev, vif_idx, NULL);
+       }
+
+       if (!!old_mask == !!dev->beacon_mask)
+               return;
+
+       en = dev->beacon_mask;
+
+       mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
+       reg = MT_BEACON_TIME_CFG_BEACON_TX |
+             MT_BEACON_TIME_CFG_TBTT_EN |
+             MT_BEACON_TIME_CFG_TIMER_EN;
+       mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
+
+       if (en)
+               mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+       else
+               mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+}
+
+void mt76x2_update_channel(struct mt76_dev *mdev)
+{
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+       struct mt76_channel_state *state;
+       u32 active, busy;
+
+       state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
+
+       busy = mt76_rr(dev, MT_CH_BUSY);
+       active = busy + mt76_rr(dev, MT_CH_IDLE);
+
+       spin_lock_bh(&dev->mt76.cc_lock);
+       state->cc_busy += busy;
+       state->cc_active += active;
+       spin_unlock_bh(&dev->mt76.cc_lock);
+}
+
+void mt76x2_mac_work(struct work_struct *work)
+{
+       struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+                                              mac_work.work);
+       int i, idx;
+
+       mt76x2_update_channel(&dev->mt76);
+       for (i = 0, idx = 0; i < 16; i++) {
+               u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+               dev->aggr_stats[idx++] += val & 0xffff;
+               dev->aggr_stats[idx++] += val >> 16;
+       }
+
+       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+                                    MT_CALIBRATE_INTERVAL);
+}
+
+void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val)
+{
+       u32 data = 0;
+
+       if (val != ~0)
+               data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
+                      MT_PROT_CFG_RTS_THRESH;
+
+       mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
+
+       mt76_rmw(dev, MT_CCK_PROT_CFG,
+                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+       mt76_rmw(dev, MT_OFDM_PROT_CFG,
+                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+       mt76_rmw(dev, MT_MM20_PROT_CFG,
+                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+       mt76_rmw(dev, MT_MM40_PROT_CFG,
+                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+       mt76_rmw(dev, MT_GF20_PROT_CFG,
+                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+       mt76_rmw(dev, MT_GF40_PROT_CFG,
+                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+       mt76_rmw(dev, MT_TX_PROT_CFG6,
+                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+       mt76_rmw(dev, MT_TX_PROT_CFG7,
+                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+       mt76_rmw(dev, MT_TX_PROT_CFG8,
+                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
new file mode 100644 (file)
index 0000000..65fef08
--- /dev/null
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+static int
+mt76x2_start(struct ieee80211_hw *hw)
+{
+       struct mt76x02_dev *dev = hw->priv;
+       int ret;
+
+       mutex_lock(&dev->mt76.mutex);
+
+       ret = mt76x2_mac_start(dev);
+       if (ret)
+               goto out;
+
+       ret = mt76x2_phy_start(dev);
+       if (ret)
+               goto out;
+
+       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+                                    MT_CALIBRATE_INTERVAL);
+
+       set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+out:
+       mutex_unlock(&dev->mt76.mutex);
+       return ret;
+}
+
+static void
+mt76x2_stop(struct ieee80211_hw *hw)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       mutex_lock(&dev->mt76.mutex);
+       clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+       mt76x2_stop_hardware(dev);
+       mutex_unlock(&dev->mt76.mutex);
+}
+
+static int
+mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+{
+       int ret;
+
+       cancel_delayed_work_sync(&dev->cal_work);
+
+       set_bit(MT76_RESET, &dev->mt76.state);
+
+       mt76_set_channel(&dev->mt76);
+
+       tasklet_disable(&dev->pre_tbtt_tasklet);
+       tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+
+       mt76x2_mac_stop(dev, true);
+       ret = mt76x2_phy_set_channel(dev, chandef);
+
+       /* channel cycle counters read-and-clear */
+       mt76_rr(dev, MT_CH_IDLE);
+       mt76_rr(dev, MT_CH_BUSY);
+
+       mt76x2_dfs_init_params(dev);
+
+       mt76x2_mac_resume(dev);
+       tasklet_enable(&dev->dfs_pd.dfs_tasklet);
+       tasklet_enable(&dev->pre_tbtt_tasklet);
+
+       clear_bit(MT76_RESET, &dev->mt76.state);
+
+       mt76_txq_schedule_all(&dev->mt76);
+
+       return ret;
+}
+
+static int
+mt76x2_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct mt76x02_dev *dev = hw->priv;
+       int ret = 0;
+
+       mutex_lock(&dev->mt76.mutex);
+
+       if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+               if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+                       dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+               else
+                       dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+               mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_POWER) {
+               dev->mt76.txpower_conf = hw->conf.power_level * 2;
+
+               /* convert to per-chain power for 2x2 devices */
+               dev->mt76.txpower_conf -= 6;
+
+               if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
+                       mt76x2_phy_set_txpower(dev);
+                       mt76x02_tx_set_txpwr_auto(dev, dev->mt76.txpower_conf);
+               }
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               ieee80211_stop_queues(hw);
+               ret = mt76x2_set_channel(dev, &hw->conf.chandef);
+               ieee80211_wake_queues(hw);
+       }
+
+       mutex_unlock(&dev->mt76.mutex);
+
+       return ret;
+}
+
+static void
+mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                       struct ieee80211_bss_conf *info, u32 changed)
+{
+       struct mt76x02_dev *dev = hw->priv;
+       struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
+
+       mutex_lock(&dev->mt76.mutex);
+
+       if (changed & BSS_CHANGED_BSSID)
+               mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
+
+       if (changed & BSS_CHANGED_BEACON_INT) {
+               mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+                              MT_BEACON_TIME_CFG_INTVAL,
+                              info->beacon_int << 4);
+               dev->beacon_int = info->beacon_int;
+               dev->tbtt_count = 0;
+       }
+
+       if (changed & BSS_CHANGED_BEACON_ENABLED) {
+               tasklet_disable(&dev->pre_tbtt_tasklet);
+               mt76x2_mac_set_beacon_enable(dev, mvif->idx,
+                                            info->enable_beacon);
+               tasklet_enable(&dev->pre_tbtt_tasklet);
+       }
+
+       if (changed & BSS_CHANGED_ERP_SLOT) {
+               int slottime = info->use_short_slot ? 9 : 20;
+
+               dev->slottime = slottime;
+               mt76x2_set_tx_ackto(dev);
+       }
+
+       mutex_unlock(&dev->mt76.mutex);
+}
+
+void
+mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
+{
+       struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+       int idx = msta->wcid.idx;
+
+       mt76_stop_tx_queues(&dev->mt76, sta, true);
+       mt76x02_mac_wcid_set_drop(&dev->mt76, idx, ps);
+}
+
+static void
+mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+              const u8 *mac)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       tasklet_disable(&dev->pre_tbtt_tasklet);
+       set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       clear_bit(MT76_SCANNING, &dev->mt76.state);
+       tasklet_enable(&dev->pre_tbtt_tasklet);
+}
+
+static void
+mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+            u32 queues, bool drop)
+{
+}
+
+static int
+mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       *dbm = dev->mt76.txpower_cur / 2;
+
+       /* convert from per-chain power to combined output on 2x2 devices */
+       *dbm += 3;
+
+       return 0;
+}
+
+static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
+                                     s16 coverage_class)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       mutex_lock(&dev->mt76.mutex);
+       dev->coverage_class = coverage_class;
+       mt76x2_set_tx_ackto(dev);
+       mutex_unlock(&dev->mt76.mutex);
+}
+
+static int
+mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
+{
+       return 0;
+}
+
+static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
+                             u32 rx_ant)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       if (!tx_ant || tx_ant > 3 || tx_ant != rx_ant)
+               return -EINVAL;
+
+       mutex_lock(&dev->mt76.mutex);
+
+       dev->mt76.chainmask = (tx_ant == 3) ? 0x202 : 0x101;
+       dev->mt76.antenna_mask = tx_ant;
+
+       mt76_set_stream_caps(&dev->mt76, true);
+       mt76x2_phy_set_antenna(dev);
+
+       mutex_unlock(&dev->mt76.mutex);
+
+       return 0;
+}
+
+static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
+                             u32 *rx_ant)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       mutex_lock(&dev->mt76.mutex);
+       *tx_ant = dev->mt76.antenna_mask;
+       *rx_ant = dev->mt76.antenna_mask;
+       mutex_unlock(&dev->mt76.mutex);
+
+       return 0;
+}
+
+static int
+mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       if (val != ~0 && val > 0xffff)
+               return -EINVAL;
+
+       mutex_lock(&dev->mutex);
+       mt76x2_mac_set_tx_protection(dev, val);
+       mutex_unlock(&dev->mutex);
+
+       return 0;
+}
+
+const struct ieee80211_ops mt76x2_ops = {
+       .tx = mt76x02_tx,
+       .start = mt76x2_start,
+       .stop = mt76x2_stop,
+       .add_interface = mt76x02_add_interface,
+       .remove_interface = mt76x02_remove_interface,
+       .config = mt76x2_config,
+       .configure_filter = mt76x02_configure_filter,
+       .bss_info_changed = mt76x2_bss_info_changed,
+       .sta_add = mt76x02_sta_add,
+       .sta_remove = mt76x02_sta_remove,
+       .set_key = mt76x02_set_key,
+       .conf_tx = mt76x02_conf_tx,
+       .sw_scan_start = mt76x2_sw_scan,
+       .sw_scan_complete = mt76x2_sw_scan_complete,
+       .flush = mt76x2_flush,
+       .ampdu_action = mt76x02_ampdu_action,
+       .get_txpower = mt76x2_get_txpower,
+       .wake_tx_queue = mt76_wake_tx_queue,
+       .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+       .release_buffered_frames = mt76_release_buffered_frames,
+       .set_coverage_class = mt76x2_set_coverage_class,
+       .get_survey = mt76_get_survey,
+       .set_tim = mt76x2_set_tim,
+       .set_antenna = mt76x2_set_antenna,
+       .get_antenna = mt76x2_get_antenna,
+       .set_rts_threshold = mt76x2_set_rts_threshold,
+};
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
new file mode 100644 (file)
index 0000000..898aa22
--- /dev/null
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x2.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+static int
+mt76pci_load_rom_patch(struct mt76x02_dev *dev)
+{
+       const struct firmware *fw = NULL;
+       struct mt76x02_patch_header *hdr;
+       bool rom_protect = !is_mt7612(dev);
+       int len, ret = 0;
+       __le32 *cur;
+       u32 patch_mask, patch_reg;
+
+       if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+               dev_err(dev->mt76.dev,
+                       "Could not get hardware semaphore for ROM PATCH\n");
+               return -ETIMEDOUT;
+       }
+
+       if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+               patch_mask = BIT(0);
+               patch_reg = MT_MCU_CLOCK_CTL;
+       } else {
+               patch_mask = BIT(1);
+               patch_reg = MT_MCU_COM_REG0;
+       }
+
+       if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+               dev_info(dev->mt76.dev, "ROM patch already applied\n");
+               goto out;
+       }
+
+       ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
+       if (ret)
+               goto out;
+
+       if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+               ret = -EIO;
+               dev_err(dev->mt76.dev, "Failed to load firmware\n");
+               goto out;
+       }
+
+       hdr = (struct mt76x02_patch_header *)fw->data;
+       dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
+
+       cur = (__le32 *) (fw->data + sizeof(*hdr));
+       len = fw->size - sizeof(*hdr);
+       mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
+
+       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+       /* Trigger ROM */
+       mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
+
+       if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
+               dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
+               ret = -ETIMEDOUT;
+       }
+
+out:
+       /* release semaphore */
+       if (rom_protect)
+               mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+       release_firmware(fw);
+       return ret;
+}
+
+static int
+mt76pci_load_firmware(struct mt76x02_dev *dev)
+{
+       const struct firmware *fw;
+       const struct mt76x02_fw_header *hdr;
+       int len, ret;
+       __le32 *cur;
+       u32 offset, val;
+
+       ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
+       if (ret)
+               return ret;
+
+       if (!fw || !fw->data || fw->size < sizeof(*hdr))
+               goto error;
+
+       hdr = (const struct mt76x02_fw_header *)fw->data;
+
+       len = sizeof(*hdr);
+       len += le32_to_cpu(hdr->ilm_len);
+       len += le32_to_cpu(hdr->dlm_len);
+
+       if (fw->size != len)
+               goto error;
+
+       val = le16_to_cpu(hdr->fw_ver);
+       dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+                (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+       val = le16_to_cpu(hdr->build_ver);
+       dev_info(dev->mt76.dev, "Build: %x\n", val);
+       dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+       cur = (__le32 *) (fw->data + sizeof(*hdr));
+       len = le32_to_cpu(hdr->ilm_len);
+
+       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
+       mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
+
+       cur += len / sizeof(*cur);
+       len = le32_to_cpu(hdr->dlm_len);
+
+       if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+               offset = MT_MCU_DLM_ADDR_E3;
+       else
+               offset = MT_MCU_DLM_ADDR;
+
+       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
+       mt76_wr_copy(dev, offset, cur, len);
+
+       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+       val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2);
+       if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
+               mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
+
+       /* trigger firmware */
+       mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
+       if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 200)) {
+               dev_err(dev->mt76.dev, "Firmware failed to start\n");
+               release_firmware(fw);
+               return -ETIMEDOUT;
+       }
+
+       dev_info(dev->mt76.dev, "Firmware running!\n");
+       mt76x02_set_ethtool_fwver(&dev->mt76, hdr);
+
+       release_firmware(fw);
+
+       return ret;
+
+error:
+       dev_err(dev->mt76.dev, "Invalid firmware\n");
+       release_firmware(fw);
+       return -ENOENT;
+}
+
+int mt76x2_mcu_init(struct mt76x02_dev *dev)
+{
+       static const struct mt76_mcu_ops mt76x2_mcu_ops = {
+               .mcu_msg_alloc = mt76x02_mcu_msg_alloc,
+               .mcu_send_msg = mt76x02_mcu_msg_send,
+       };
+       int ret;
+
+       dev->mt76.mcu_ops = &mt76x2_mcu_ops;
+
+       ret = mt76pci_load_rom_patch(dev);
+       if (ret)
+               return ret;
+
+       ret = mt76pci_load_firmware(dev);
+       if (ret)
+               return ret;
+
+       mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, true);
+       return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
new file mode 100644 (file)
index 0000000..40ea5f7
--- /dev/null
@@ -0,0 +1,459 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+
+static bool
+mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
+{
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+       u32 flag = 0;
+
+       if (!mt76x02_tssi_enabled(&dev->mt76))
+               return false;
+
+       if (mt76x2_channel_silent(dev))
+               return false;
+
+       if (chan->band == NL80211_BAND_5GHZ)
+               flag |= BIT(0);
+
+       if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band))
+               flag |= BIT(8);
+
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI, flag, true);
+       dev->cal.tssi_cal_done = true;
+       return true;
+}
+
+static void
+mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
+{
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+       bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+       if (dev->cal.channel_cal_done)
+               return;
+
+       if (mt76x2_channel_silent(dev))
+               return;
+
+       if (!dev->cal.tssi_cal_done)
+               mt76x2_phy_tssi_init_cal(dev);
+
+       if (!mac_stopped)
+               mt76x2_mac_stop(dev, false);
+
+       if (is_5ghz)
+               mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, true);
+
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, true);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, true);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, true);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, true);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_SHAPING, 0, true);
+
+       if (!mac_stopped)
+               mt76x2_mac_resume(dev);
+
+       mt76x2_apply_gain_adj(dev);
+
+       dev->cal.channel_cal_done = true;
+}
+
+void mt76x2_phy_set_antenna(struct mt76x02_dev *dev)
+{
+       u32 val;
+
+       val = mt76_rr(dev, MT_BBP(AGC, 0));
+       val &= ~(BIT(4) | BIT(1));
+       switch (dev->mt76.antenna_mask) {
+       case 1:
+               /* disable mac DAC control */
+               mt76_clear(dev, MT_BBP(IBI, 9), BIT(11));
+               mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+               mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0x3);
+               mt76_rmw_field(dev, MT_BBP(CORE, 32), GENMASK(21, 20), 2);
+               /* disable DAC 1 */
+               mt76_rmw_field(dev, MT_BBP(CORE, 33), GENMASK(12, 9), 4);
+
+               val &= ~(BIT(3) | BIT(0));
+               break;
+       case 2:
+               /* disable mac DAC control */
+               mt76_clear(dev, MT_BBP(IBI, 9), BIT(11));
+               mt76_rmw_field(dev, MT_BBP(TXBE, 5), 3, 1);
+               mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0xc);
+               mt76_rmw_field(dev, MT_BBP(CORE, 32), GENMASK(21, 20), 1);
+               /* disable DAC 0 */
+               mt76_rmw_field(dev, MT_BBP(CORE, 33), GENMASK(12, 9), 1);
+
+               val &= ~BIT(3);
+               val |= BIT(0);
+               break;
+       case 3:
+       default:
+               /* enable mac DAC control */
+               mt76_set(dev, MT_BBP(IBI, 9), BIT(11));
+               mt76_set(dev, MT_BBP(TXBE, 5), 3);
+               mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0xf);
+               mt76_clear(dev, MT_BBP(CORE, 32), GENMASK(21, 20));
+               mt76_clear(dev, MT_BBP(CORE, 33), GENMASK(12, 9));
+
+               val &= ~BIT(0);
+               val |= BIT(3);
+               break;
+       }
+       mt76_wr(dev, MT_BBP(AGC, 0), val);
+}
+
+static void
+mt76x2_get_agc_gain(struct mt76x02_dev *dev, u8 *dest)
+{
+       dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN);
+       dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN);
+}
+
+static int
+mt76x2_get_rssi_gain_thresh(struct mt76x02_dev *dev)
+{
+       switch (dev->mt76.chandef.width) {
+       case NL80211_CHAN_WIDTH_80:
+               return -62;
+       case NL80211_CHAN_WIDTH_40:
+               return -65;
+       default:
+               return -68;
+       }
+}
+
+static int
+mt76x2_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
+{
+       switch (dev->mt76.chandef.width) {
+       case NL80211_CHAN_WIDTH_80:
+               return -76;
+       case NL80211_CHAN_WIDTH_40:
+               return -79;
+       default:
+               return -82;
+       }
+}
+
+static void
+mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
+{
+       u32 val;
+       u8 gain_val[2];
+
+       gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+       gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
+
+       if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+               val = 0x1e42 << 16;
+       else
+               val = 0x1836 << 16;
+
+       val |= 0xf8;
+
+       mt76_wr(dev, MT_BBP(AGC, 8),
+               val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0]));
+       mt76_wr(dev, MT_BBP(AGC, 9),
+               val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
+
+       if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
+               mt76x2_dfs_adjust_agc(dev);
+}
+
+static void
+mt76x2_phy_adjust_vga_gain(struct mt76x02_dev *dev)
+{
+       u32 false_cca;
+       u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
+
+       false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
+       dev->cal.false_cca = false_cca;
+       if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
+               dev->cal.agc_gain_adjust += 2;
+       else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
+                (dev->cal.agc_gain_adjust >= limit && false_cca < 500))
+               dev->cal.agc_gain_adjust -= 2;
+       else
+               return;
+
+       mt76x2_phy_set_gain_val(dev);
+}
+
+static void
+mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
+{
+       u8 *gain = dev->cal.agc_gain_init;
+       u8 low_gain_delta, gain_delta;
+       bool gain_change;
+       int low_gain;
+       u32 val;
+
+       dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(&dev->mt76);
+
+       low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
+                  (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
+
+       gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
+       dev->cal.low_gain = low_gain;
+
+       if (!gain_change) {
+               mt76x2_phy_adjust_vga_gain(dev);
+               return;
+       }
+
+       if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) {
+               mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
+               val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
+               if (low_gain == 2)
+                       val |= 0x3;
+               else
+                       val |= 0x5;
+               mt76_wr(dev, MT_BBP(AGC, 26), val);
+       } else {
+               mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
+       }
+
+       if (mt76x2_has_ext_lna(dev))
+               low_gain_delta = 10;
+       else
+               low_gain_delta = 14;
+
+       if (low_gain == 2) {
+               mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
+               mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
+               mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
+               gain_delta = low_gain_delta;
+               dev->cal.agc_gain_adjust = 0;
+       } else {
+               mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
+               if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+                       mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
+               else
+                       mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
+               mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
+               gain_delta = 0;
+               dev->cal.agc_gain_adjust = low_gain_delta;
+       }
+
+       dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
+       dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
+       mt76x2_phy_set_gain_val(dev);
+
+       /* clear false CCA counters */
+       mt76_rr(dev, MT_RX_STAT_1);
+}
+
+int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
+                          struct cfg80211_chan_def *chandef)
+{
+       struct ieee80211_channel *chan = chandef->chan;
+       bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+       enum nl80211_band band = chan->band;
+       u8 channel;
+
+       u32 ext_cca_chan[4] = {
+               [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+               [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+               [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+               [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+       };
+       int ch_group_index;
+       u8 bw, bw_index;
+       int freq, freq1;
+       int ret;
+
+       dev->cal.channel_cal_done = false;
+       freq = chandef->chan->center_freq;
+       freq1 = chandef->center_freq1;
+       channel = chan->hw_value;
+
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_40:
+               bw = 1;
+               if (freq1 > freq) {
+                       bw_index = 1;
+                       ch_group_index = 0;
+               } else {
+                       bw_index = 3;
+                       ch_group_index = 1;
+               }
+               channel += 2 - ch_group_index * 4;
+               break;
+       case NL80211_CHAN_WIDTH_80:
+               ch_group_index = (freq - freq1 + 30) / 20;
+               if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+                       ch_group_index = 0;
+               bw = 2;
+               bw_index = ch_group_index;
+               channel += 6 - ch_group_index * 4;
+               break;
+       default:
+               bw = 0;
+               bw_index = 0;
+               ch_group_index = 0;
+               break;
+       }
+
+       mt76x2_read_rx_gain(dev);
+       mt76x2_phy_set_txpower_regs(dev, band);
+       mt76x2_configure_tx_delay(dev, band, bw);
+       mt76x2_phy_set_txpower(dev);
+
+       mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
+       mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+
+       mt76_rmw(dev, MT_EXT_CCA_CFG,
+                (MT_EXT_CCA_CFG_CCA0 |
+                 MT_EXT_CCA_CFG_CCA1 |
+                 MT_EXT_CCA_CFG_CCA2 |
+                 MT_EXT_CCA_CFG_CCA3 |
+                 MT_EXT_CCA_CFG_CCA_MASK),
+                ext_cca_chan[ch_group_index]);
+
+       ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
+       if (ret)
+               return ret;
+
+       mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+       mt76x2_phy_set_antenna(dev);
+
+       /* Enable LDPC Rx */
+       if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+               mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+       if (!dev->cal.init_cal_done) {
+               u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT);
+
+               if (val != 0xff)
+                       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, true);
+       }
+
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, true);
+
+       /* Rx LPF calibration */
+       if (!dev->cal.init_cal_done)
+               mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, true);
+
+       dev->cal.init_cal_done = true;
+
+       mt76_wr(dev, MT_BBP(AGC, 61), 0xFF64A4E2);
+       mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+       mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+       mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+       mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F);
+
+       if (scan)
+               return 0;
+
+       dev->cal.low_gain = -1;
+       mt76x2_phy_channel_calibrate(dev, true);
+       mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init);
+       memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
+              sizeof(dev->cal.agc_gain_cur));
+
+       /* init default values for temp compensation */
+       if (mt76x02_tssi_enabled(&dev->mt76)) {
+               mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+                              0x38);
+               mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+                              0x38);
+       }
+
+       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+                                    MT_CALIBRATE_INTERVAL);
+
+       return 0;
+}
+
+static void
+mt76x2_phy_temp_compensate(struct mt76x02_dev *dev)
+{
+       struct mt76x2_temp_comp t;
+       int temp, db_diff;
+
+       if (mt76x2_get_temp_comp(dev, &t))
+               return;
+
+       temp = mt76_get_field(dev, MT_TEMP_SENSOR, MT_TEMP_SENSOR_VAL);
+       temp -= t.temp_25_ref;
+       temp = (temp * 1789) / 1000 + 25;
+       dev->cal.temp = temp;
+
+       if (temp > 25)
+               db_diff = (temp - 25) / t.high_slope;
+       else
+               db_diff = (25 - temp) / t.low_slope;
+
+       db_diff = min(db_diff, t.upper_bound);
+       db_diff = max(db_diff, t.lower_bound);
+
+       mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+                      db_diff * 2);
+       mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+                      db_diff * 2);
+}
+
+void mt76x2_phy_calibrate(struct work_struct *work)
+{
+       struct mt76x02_dev *dev;
+
+       dev = container_of(work, struct mt76x02_dev, cal_work.work);
+       mt76x2_phy_channel_calibrate(dev, false);
+       mt76x2_phy_tssi_compensate(dev, true);
+       mt76x2_phy_temp_compensate(dev);
+       mt76x2_phy_update_channel_gain(dev);
+       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+                                    MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2_phy_start(struct mt76x02_dev *dev)
+{
+       int ret;
+
+       ret = mt76x02_mcu_set_radio_state(&dev->mt76, true, true);
+       if (ret)
+               return ret;
+
+       mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+
+       return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c
new file mode 100644 (file)
index 0000000..3a2ec86
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+struct beacon_bc_data {
+       struct mt76x02_dev *dev;
+       struct sk_buff_head q;
+       struct sk_buff *tail[8];
+};
+
+static void
+mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct mt76x02_dev *dev = (struct mt76x02_dev *) priv;
+       struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
+       struct sk_buff *skb = NULL;
+
+       if (!(dev->beacon_mask & BIT(mvif->idx)))
+               return;
+
+       skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+       if (!skb)
+               return;
+
+       mt76x2_mac_set_beacon(dev, mvif->idx, skb);
+}
+
+static void
+mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct beacon_bc_data *data = priv;
+       struct mt76x02_dev *dev = data->dev;
+       struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
+       struct ieee80211_tx_info *info;
+       struct sk_buff *skb;
+
+       if (!(dev->beacon_mask & BIT(mvif->idx)))
+               return;
+
+       skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+       if (!skb)
+               return;
+
+       info = IEEE80211_SKB_CB(skb);
+       info->control.vif = vif;
+       info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+       mt76_skb_set_moredata(skb, true);
+       __skb_queue_tail(&data->q, skb);
+       data->tail[mvif->idx] = skb;
+}
+
+static void
+mt76x2_resync_beacon_timer(struct mt76x02_dev *dev)
+{
+       u32 timer_val = dev->beacon_int << 4;
+
+       dev->tbtt_count++;
+
+       /*
+        * Beacon timer drifts by 1us every tick, the timer is configured
+        * in 1/16 TU (64us) units.
+        */
+       if (dev->tbtt_count < 62)
+               return;
+
+       if (dev->tbtt_count >= 64) {
+               dev->tbtt_count = 0;
+               return;
+       }
+
+       /*
+        * The updated beacon interval takes effect after two TBTT, because
+        * at this point the original interval has already been loaded into
+        * the next TBTT_TIMER value
+        */
+       if (dev->tbtt_count == 62)
+               timer_val -= 1;
+
+       mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+                      MT_BEACON_TIME_CFG_INTVAL, timer_val);
+}
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg)
+{
+       struct mt76x02_dev *dev = (struct mt76x02_dev *) arg;
+       struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
+       struct beacon_bc_data data = {};
+       struct sk_buff *skb;
+       int i, nframes;
+
+       mt76x2_resync_beacon_timer(dev);
+
+       data.dev = dev;
+       __skb_queue_head_init(&data.q);
+
+       ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+               IEEE80211_IFACE_ITER_RESUME_ALL,
+               mt76x2_update_beacon_iter, dev);
+
+       do {
+               nframes = skb_queue_len(&data.q);
+               ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+                       IEEE80211_IFACE_ITER_RESUME_ALL,
+                       mt76x2_add_buffered_bc, &data);
+       } while (nframes != skb_queue_len(&data.q));
+
+       if (!nframes)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+               if (!data.tail[i])
+                       continue;
+
+               mt76_skb_set_moredata(data.tail[i], false);
+       }
+
+       spin_lock_bh(&q->lock);
+       while ((skb = __skb_dequeue(&data.q)) != NULL) {
+               struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+               struct ieee80211_vif *vif = info->control.vif;
+               struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
+
+               mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
+                                     NULL);
+       }
+       spin_unlock_bh(&q->lock);
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
new file mode 100644 (file)
index 0000000..f00aed9
--- /dev/null
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "eeprom.h"
+#include "mcu.h"
+#include "../mt76x02_phy.h"
+
+static void
+mt76x2_adjust_high_lna_gain(struct mt76x02_dev *dev, int reg, s8 offset)
+{
+       s8 gain;
+
+       gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+       gain -= offset / 2;
+       mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
+}
+
+static void
+mt76x2_adjust_agc_gain(struct mt76x02_dev *dev, int reg, s8 offset)
+{
+       s8 gain;
+
+       gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+       gain += offset;
+       mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
+}
+
+void mt76x2_apply_gain_adj(struct mt76x02_dev *dev)
+{
+       s8 *gain_adj = dev->cal.rx.high_gain;
+
+       mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
+       mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
+
+       mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
+       mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
+}
+EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
+
+void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
+                                enum nl80211_band band)
+{
+       u32 pa_mode[2];
+       u32 pa_mode_adj;
+
+       if (band == NL80211_BAND_2GHZ) {
+               pa_mode[0] = 0x010055ff;
+               pa_mode[1] = 0x00550055;
+
+               mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
+               mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
+
+               if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+                       mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
+                       mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
+               } else {
+                       mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
+                       mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
+               }
+       } else {
+               pa_mode[0] = 0x0000ffff;
+               pa_mode[1] = 0x00ff00ff;
+
+               if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+                       mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
+                       mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
+               } else {
+                       mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
+                       mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
+               }
+
+               if (mt76x02_ext_pa_enabled(&dev->mt76, band))
+                       pa_mode_adj = 0x04000000;
+               else
+                       pa_mode_adj = 0;
+
+               mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
+               mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
+       }
+
+       mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
+       mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
+       mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
+       mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
+
+       if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+               u32 val;
+
+               if (band == NL80211_BAND_2GHZ)
+                       val = 0x3c3c023c;
+               else
+                       val = 0x363c023c;
+
+               mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+               mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+               mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
+       } else {
+               if (band == NL80211_BAND_2GHZ) {
+                       u32 val = 0x0f3c3c3c;
+
+                       mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+                       mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+                       mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
+               } else {
+                       mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
+                       mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
+                       mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
+
+static int
+mt76x2_get_min_rate_power(struct mt76_rate_power *r)
+{
+       int i;
+       s8 ret = 0;
+
+       for (i = 0; i < sizeof(r->all); i++) {
+               if (!r->all[i])
+                       continue;
+
+               if (ret)
+                       ret = min(ret, r->all[i]);
+               else
+                       ret = r->all[i];
+       }
+
+       return ret;
+}
+
+void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
+{
+       enum nl80211_chan_width width = dev->mt76.chandef.width;
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+       struct mt76x2_tx_power_info txp;
+       int txp_0, txp_1, delta = 0;
+       struct mt76_rate_power t = {};
+       int base_power, gain;
+
+       mt76x2_get_power_info(dev, &txp, chan);
+
+       if (width == NL80211_CHAN_WIDTH_40)
+               delta = txp.delta_bw40;
+       else if (width == NL80211_CHAN_WIDTH_80)
+               delta = txp.delta_bw80;
+
+       mt76x2_get_rate_power(dev, &t, chan);
+       mt76x02_add_rate_power_offset(&t, txp.chain[0].target_power);
+       mt76x02_limit_rate_power(&t, dev->mt76.txpower_conf);
+       dev->mt76.txpower_cur = mt76x02_get_max_rate_power(&t);
+
+       base_power = mt76x2_get_min_rate_power(&t);
+       delta += base_power - txp.chain[0].target_power;
+       txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
+       txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
+
+       gain = min(txp_0, txp_1);
+       if (gain < 0) {
+               base_power -= gain;
+               txp_0 -= gain;
+               txp_1 -= gain;
+       } else if (gain > 0x2f) {
+               base_power -= gain - 0x2f;
+               txp_0 = 0x2f;
+               txp_1 = 0x2f;
+       }
+
+       mt76x02_add_rate_power_offset(&t, -base_power);
+       dev->target_power = txp.chain[0].target_power;
+       dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
+       dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
+       dev->mt76.rate_power = t;
+
+       mt76x02_phy_set_txpower(&dev->mt76, txp_0, txp_1);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
+
+void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
+                              enum nl80211_band band, u8 bw)
+{
+       u32 cfg0, cfg1;
+
+       if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
+               cfg0 = bw ? 0x000b0c01 : 0x00101101;
+               cfg1 = 0x00011414;
+       } else {
+               cfg0 = bw ? 0x000b0b01 : 0x00101001;
+               cfg1 = 0x00021414;
+       }
+       mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
+       mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
+
+       mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
+}
+EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
+
+void mt76x2_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
+{
+       int core_val, agc_val;
+
+       switch (width) {
+       case NL80211_CHAN_WIDTH_80:
+               core_val = 3;
+               agc_val = 7;
+               break;
+       case NL80211_CHAN_WIDTH_40:
+               core_val = 2;
+               agc_val = 3;
+               break;
+       default:
+               core_val = 0;
+               agc_val = 1;
+               break;
+       }
+
+       mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+       mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+       mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+       mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
+
+void mt76x2_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper)
+{
+       switch (band) {
+       case NL80211_BAND_2GHZ:
+               mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+               mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+               break;
+       case NL80211_BAND_5GHZ:
+               mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+               mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+               break;
+       }
+
+       mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+                      primary_upper);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
+
+void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
+{
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+       struct mt76x2_tx_power_info txp;
+       struct mt76x2_tssi_comp t = {};
+
+       if (!dev->cal.tssi_cal_done)
+               return;
+
+       if (!dev->cal.tssi_comp_pending) {
+               /* TSSI trigger */
+               t.cal_mode = BIT(0);
+               mt76x2_mcu_tssi_comp(dev, &t);
+               dev->cal.tssi_comp_pending = true;
+       } else {
+               if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+                       return;
+
+               dev->cal.tssi_comp_pending = false;
+               mt76x2_get_power_info(dev, &txp, chan);
+
+               if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band))
+                       t.pa_mode = 1;
+
+               t.cal_mode = BIT(1);
+               t.slope0 = txp.chain[0].tssi_slope;
+               t.offset0 = txp.chain[0].tssi_offset;
+               t.slope1 = txp.chain[1].tssi_slope;
+               t.offset1 = txp.chain[1].tssi_offset;
+               mt76x2_mcu_tssi_comp(dev, &t);
+
+               if (t.pa_mode || dev->cal.dpd_cal_done)
+                       return;
+
+               usleep_range(10000, 20000);
+               mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_DPD,
+                                     chan->hw_value, wait);
+               dev->cal.dpd_cal_done = true;
+       }
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_tssi_compensate);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
new file mode 100644 (file)
index 0000000..57baf8d
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "../mt76x02_usb.h"
+#include "mt76x2u.h"
+
+static const struct usb_device_id mt76x2u_device_table[] = {
+       { USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */
+       { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
+       { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
+       { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
+       { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
+       { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
+       { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
+       { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
+       { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
+       { },
+};
+
+static int mt76x2u_probe(struct usb_interface *intf,
+                        const struct usb_device_id *id)
+{
+       struct usb_device *udev = interface_to_usbdev(intf);
+       struct mt76x02_dev *dev;
+       int err;
+
+       dev = mt76x2u_alloc_device(&intf->dev);
+       if (!dev)
+               return -ENOMEM;
+
+       udev = usb_get_dev(udev);
+       usb_reset_device(udev);
+
+       mt76x02u_init_mcu(&dev->mt76);
+       err = mt76u_init(&dev->mt76, intf);
+       if (err < 0)
+               goto err;
+
+       dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+       dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+       err = mt76x2u_register_device(dev);
+       if (err < 0)
+               goto err;
+
+       return 0;
+
+err:
+       ieee80211_free_hw(mt76_hw(dev));
+       usb_set_intfdata(intf, NULL);
+       usb_put_dev(udev);
+
+       return err;
+}
+
+static void mt76x2u_disconnect(struct usb_interface *intf)
+{
+       struct usb_device *udev = interface_to_usbdev(intf);
+       struct mt76x02_dev *dev = usb_get_intfdata(intf);
+       struct ieee80211_hw *hw = mt76_hw(dev);
+
+       set_bit(MT76_REMOVED, &dev->mt76.state);
+       ieee80211_unregister_hw(hw);
+       mt76x2u_cleanup(dev);
+
+       ieee80211_free_hw(hw);
+       usb_set_intfdata(intf, NULL);
+       usb_put_dev(udev);
+}
+
+static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
+                                         pm_message_t state)
+{
+       struct mt76x02_dev *dev = usb_get_intfdata(intf);
+       struct mt76_usb *usb = &dev->mt76.usb;
+
+       mt76u_stop_queues(&dev->mt76);
+       mt76x2u_stop_hw(dev);
+       usb_kill_urb(usb->mcu.res.urb);
+
+       return 0;
+}
+
+static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
+{
+       struct mt76x02_dev *dev = usb_get_intfdata(intf);
+       struct mt76_usb *usb = &dev->mt76.usb;
+       int err;
+
+       reinit_completion(&usb->mcu.cmpl);
+       err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
+                              MT_EP_IN_CMD_RESP,
+                              &usb->mcu.res, GFP_KERNEL,
+                              mt76u_mcu_complete_urb,
+                              &usb->mcu.cmpl);
+       if (err < 0)
+               goto err;
+
+       err = mt76u_submit_rx_buffers(&dev->mt76);
+       if (err < 0)
+               goto err;
+
+       tasklet_enable(&usb->rx_tasklet);
+       tasklet_enable(&usb->tx_tasklet);
+
+       err = mt76x2u_init_hardware(dev);
+       if (err < 0)
+               goto err;
+
+       return 0;
+
+err:
+       mt76x2u_cleanup(dev);
+       return err;
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
+MODULE_FIRMWARE(MT7662U_FIRMWARE);
+MODULE_FIRMWARE(MT7662U_ROM_PATCH);
+
+static struct usb_driver mt76x2u_driver = {
+       .name           = KBUILD_MODNAME,
+       .id_table       = mt76x2u_device_table,
+       .probe          = mt76x2u_probe,
+       .disconnect     = mt76x2u_disconnect,
+#ifdef CONFIG_PM
+       .suspend        = mt76x2u_suspend,
+       .resume         = mt76x2u_resume,
+       .reset_resume   = mt76x2u_resume,
+#endif /* CONFIG_PM */
+       .soft_unbind    = 1,
+       .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x2u_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
new file mode 100644 (file)
index 0000000..c82f16e
--- /dev/null
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+
+#include "mt76x2u.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+#include "../mt76x02_usb.h"
+
+static void mt76x2u_init_dma(struct mt76x02_dev *dev)
+{
+       u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+
+       val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD |
+              MT_USB_DMA_CFG_RX_BULK_EN |
+              MT_USB_DMA_CFG_TX_BULK_EN;
+
+       /* disable AGGR_BULK_RX in order to receive one
+        * frame in each rx urb and avoid copies
+        */
+       val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+       mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+}
+
+static void mt76x2u_power_on_rf_patch(struct mt76x02_dev *dev)
+{
+       mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
+       udelay(1);
+
+       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff);
+       mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30);
+
+       mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f);
+       udelay(1);
+
+       mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17));
+       usleep_range(150, 200);
+
+       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16));
+       usleep_range(50, 100);
+
+       mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
+}
+
+static void mt76x2u_power_on_rf(struct mt76x02_dev *dev, int unit)
+{
+       int shift = unit ? 8 : 0;
+       u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
+
+       /* Enable RF BG */
+       mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift);
+       usleep_range(10, 20);
+
+       /* Enable RFDIG LDO/AFE/ABB/ADDA */
+       mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val);
+       usleep_range(10, 20);
+
+       /* Switch RFDIG power to internal LDO */
+       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift);
+       usleep_range(10, 20);
+
+       mt76x2u_power_on_rf_patch(dev);
+
+       mt76_set(dev, 0x530, 0xf);
+}
+
+static void mt76x2u_power_on(struct mt76x02_dev *dev)
+{
+       u32 val;
+
+       /* Turn on WL MTCMOS */
+       mt76_set(dev, MT_VEND_ADDR(CFG, 0x148),
+                MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+       val = MT_WLAN_MTC_CTRL_STATE_UP |
+             MT_WLAN_MTC_CTRL_PWR_ACK |
+             MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+       mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000);
+
+       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16);
+       usleep_range(10, 20);
+
+       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+       usleep_range(10, 20);
+
+       mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff);
+
+       /* Turn on AD/DA power down */
+       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3));
+
+       /* WLAN function enable */
+       mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0));
+
+       /* Release BBP software reset */
+       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18));
+
+       mt76x2u_power_on_rf(dev, 0);
+       mt76x2u_power_on_rf(dev, 1);
+}
+
+static int mt76x2u_init_eeprom(struct mt76x02_dev *dev)
+{
+       u32 val, i;
+
+       dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev,
+                                            MT7612U_EEPROM_SIZE,
+                                            GFP_KERNEL);
+       dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE;
+       if (!dev->mt76.eeprom.data)
+               return -ENOMEM;
+
+       for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) {
+               val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i));
+               put_unaligned_le32(val, dev->mt76.eeprom.data + i);
+       }
+
+       mt76x02_eeprom_parse_hw_cap(&dev->mt76);
+       return 0;
+}
+
+struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev)
+{
+       static const struct mt76_driver_ops drv_ops = {
+               .tx_prepare_skb = mt76x02u_tx_prepare_skb,
+               .tx_complete_skb = mt76x02u_tx_complete_skb,
+               .tx_status_data = mt76x02_tx_status_data,
+               .rx_skb = mt76x02_queue_rx_skb,
+       };
+       struct mt76x02_dev *dev;
+       struct mt76_dev *mdev;
+
+       mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
+       if (!mdev)
+               return NULL;
+
+       dev = container_of(mdev, struct mt76x02_dev, mt76);
+       mdev->dev = pdev;
+       mdev->drv = &drv_ops;
+
+       return dev;
+}
+
+static void mt76x2u_init_beacon_offsets(struct mt76x02_dev *dev)
+{
+       mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
+       mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
+       mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840);
+       mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
+}
+
+int mt76x2u_init_hardware(struct mt76x02_dev *dev)
+{
+       const struct mt76_wcid_addr addr = {
+               .macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+               .ba_mask = 0,
+       };
+       int i, err;
+
+       mt76x2_reset_wlan(dev, true);
+       mt76x2u_power_on(dev);
+
+       if (!mt76x02_wait_for_mac(&dev->mt76))
+               return -ETIMEDOUT;
+
+       err = mt76x2u_mcu_fw_init(dev);
+       if (err < 0)
+               return err;
+
+       if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+                           MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+                           MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
+               return -EIO;
+
+       /* wait for asic ready after fw load. */
+       if (!mt76x02_wait_for_mac(&dev->mt76))
+               return -ETIMEDOUT;
+
+       mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0);
+       mt76_wr(dev, MT_TSO_CTRL, 0);
+
+       mt76x2u_init_dma(dev);
+
+       err = mt76x2u_mcu_init(dev);
+       if (err < 0)
+               return err;
+
+       err = mt76x2u_mac_reset(dev);
+       if (err < 0)
+               return err;
+
+       mt76x02_mac_setaddr(&dev->mt76,
+                           dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+       dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+       mt76x2u_init_beacon_offsets(dev);
+
+       if (!mt76x02_wait_for_txrx_idle(&dev->mt76))
+               return -ETIMEDOUT;
+
+       /* reset wcid table */
+       for (i = 0; i < 254; i++)
+               mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr,
+                            sizeof(struct mt76_wcid_addr));
+
+       /* reset shared key table and pairwise key table */
+       for (i = 0; i < 4; i++)
+               mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0);
+       for (i = 0; i < 256; i++)
+               mt76_wr(dev, MT_WCID_ATTR(i), 1);
+
+       mt76_clear(dev, MT_BEACON_TIME_CFG,
+                  MT_BEACON_TIME_CFG_TIMER_EN |
+                  MT_BEACON_TIME_CFG_SYNC_MODE |
+                  MT_BEACON_TIME_CFG_TBTT_EN |
+                  MT_BEACON_TIME_CFG_BEACON_TX);
+
+       mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+       mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
+
+       err = mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+       if (err < 0)
+               return err;
+
+       mt76x02_phy_set_rxpath(&dev->mt76);
+       mt76x02_phy_set_txdac(&dev->mt76);
+
+       return mt76x2u_mac_stop(dev);
+}
+
+int mt76x2u_register_device(struct mt76x02_dev *dev)
+{
+       struct ieee80211_hw *hw = mt76_hw(dev);
+       struct wiphy *wiphy = hw->wiphy;
+       int err;
+
+       INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
+       mt76x2_init_device(dev);
+
+       err = mt76x2u_init_eeprom(dev);
+       if (err < 0)
+               return err;
+
+       err = mt76u_alloc_queues(&dev->mt76);
+       if (err < 0)
+               goto fail;
+
+       err = mt76u_mcu_init_rx(&dev->mt76);
+       if (err < 0)
+               goto fail;
+
+       err = mt76x2u_init_hardware(dev);
+       if (err < 0)
+               goto fail;
+
+       wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+       err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
+                                  ARRAY_SIZE(mt76x02_rates));
+       if (err)
+               goto fail;
+
+       /* check hw sg support in order to enable AMSDU */
+       if (mt76u_check_sg(&dev->mt76))
+               hw->max_tx_fragments = MT_SG_MAX_SIZE;
+       else
+               hw->max_tx_fragments = 1;
+
+       set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+       mt76x2_init_debugfs(dev);
+       mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
+       mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
+       return 0;
+
+fail:
+       mt76x2u_cleanup(dev);
+       return err;
+}
+
+void mt76x2u_stop_hw(struct mt76x02_dev *dev)
+{
+       mt76u_stop_stat_wk(&dev->mt76);
+       cancel_delayed_work_sync(&dev->cal_work);
+       mt76x2u_mac_stop(dev);
+}
+
+void mt76x2u_cleanup(struct mt76x02_dev *dev)
+{
+       mt76x02_mcu_set_radio_state(&dev->mt76, false, false);
+       mt76x2u_stop_hw(dev);
+       mt76u_queues_deinit(&dev->mt76);
+       mt76u_mcu_deinit(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
new file mode 100644 (file)
index 0000000..dbd635a
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "eeprom.h"
+
+static void mt76x2u_mac_reset_counters(struct mt76x02_dev *dev)
+{
+       mt76_rr(dev, MT_RX_STAT_0);
+       mt76_rr(dev, MT_RX_STAT_1);
+       mt76_rr(dev, MT_RX_STAT_2);
+       mt76_rr(dev, MT_TX_STA_0);
+       mt76_rr(dev, MT_TX_STA_1);
+       mt76_rr(dev, MT_TX_STA_2);
+}
+
+static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
+{
+       s8 offset = 0;
+       u16 eep_val;
+
+       eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2);
+
+       offset = eep_val & 0x7f;
+       if ((eep_val & 0xff) == 0xff)
+               offset = 0;
+       else if (eep_val & 0x80)
+               offset = 0 - offset;
+
+       eep_val >>= 8;
+       if (eep_val == 0x00 || eep_val == 0xff) {
+               eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1);
+               eep_val &= 0xff;
+
+               if (eep_val == 0x00 || eep_val == 0xff)
+                       eep_val = 0x14;
+       }
+
+       eep_val &= 0x7f;
+       mt76_rmw_field(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL5),
+                      MT_XO_CTRL5_C2_VAL, eep_val + offset);
+       mt76_set(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL6), MT_XO_CTRL6_C2_CTRL);
+
+       mt76_wr(dev, 0x504, 0x06000000);
+       mt76_wr(dev, 0x50c, 0x08800000);
+       mdelay(5);
+       mt76_wr(dev, 0x504, 0x0);
+
+       /* decrease SIFS from 16us to 13us */
+       mt76_rmw_field(dev, MT_XIFS_TIME_CFG,
+                      MT_XIFS_TIME_CFG_OFDM_SIFS, 0xd);
+       mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, MT_BKOFF_SLOT_CFG_CC_DELAY, 1);
+
+       /* init fce */
+       mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+       eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2);
+       switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+       case 0:
+               mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+               break;
+       case 1:
+               mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+               break;
+       default:
+               break;
+       }
+}
+
+int mt76x2u_mac_reset(struct mt76x02_dev *dev)
+{
+       mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5));
+
+       /* init pbf regs */
+       mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+       mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+
+       mt76_write_mac_initvals(dev);
+
+       mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
+       mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
+       mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
+       mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
+
+       mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
+       mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
+       mt76_wr(dev, MT_WMM_CWMAX, 0x34aa);
+
+       mt76_clear(dev, MT_MAC_SYS_CTRL,
+                  MT_MAC_SYS_CTRL_RESET_CSR |
+                  MT_MAC_SYS_CTRL_RESET_BBP);
+
+       if (is_mt7612(dev))
+               mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+       mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
+       mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+       mt76x2u_mac_fixup_xtal(dev);
+
+       return 0;
+}
+
+int mt76x2u_mac_start(struct mt76x02_dev *dev)
+{
+       mt76x2u_mac_reset_counters(dev);
+
+       mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+       mt76x02_wait_for_wpdma(&dev->mt76, 1000);
+       usleep_range(50, 100);
+
+       mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+
+       mt76_wr(dev, MT_MAC_SYS_CTRL,
+               MT_MAC_SYS_CTRL_ENABLE_TX |
+               MT_MAC_SYS_CTRL_ENABLE_RX);
+
+       return 0;
+}
+
+int mt76x2u_mac_stop(struct mt76x02_dev *dev)
+{
+       int i, count = 0, val;
+       bool stopped = false;
+       u32 rts_cfg;
+
+       if (test_bit(MT76_REMOVED, &dev->mt76.state))
+               return -EIO;
+
+       rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+       mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+       mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
+       mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
+
+       /* wait tx dma to stop */
+       for (i = 0; i < 2000; i++) {
+               val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+               if (!(val & MT_USB_DMA_CFG_TX_BUSY) && i > 10)
+                       break;
+               usleep_range(50, 100);
+       }
+
+       /* page count on TxQ */
+       for (i = 0; i < 200; i++) {
+               if (!(mt76_rr(dev, 0x0438) & 0xffffffff) &&
+                   !(mt76_rr(dev, 0x0a30) & 0x000000ff) &&
+                   !(mt76_rr(dev, 0x0a34) & 0xff00ff00))
+                       break;
+               usleep_range(10, 20);
+       }
+
+       /* disable tx-rx */
+       mt76_clear(dev, MT_MAC_SYS_CTRL,
+                  MT_MAC_SYS_CTRL_ENABLE_RX |
+                  MT_MAC_SYS_CTRL_ENABLE_TX);
+
+       /* Wait for MAC to become idle */
+       for (i = 0; i < 1000; i++) {
+               if (!(mt76_rr(dev, MT_MAC_STATUS) & MT_MAC_STATUS_TX) &&
+                   !mt76_rr(dev, MT_BBP(IBI, 12))) {
+                       stopped = true;
+                       break;
+               }
+               usleep_range(10, 20);
+       }
+
+       if (!stopped) {
+               mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+               mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+               mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+               mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+       }
+
+       /* page count on RxQ */
+       for (i = 0; i < 200; i++) {
+               if (!(mt76_rr(dev, 0x0430) & 0x00ff0000) &&
+                   !(mt76_rr(dev, 0x0a30) & 0xffffffff) &&
+                   !(mt76_rr(dev, 0x0a34) & 0xffffffff) &&
+                   ++count > 10)
+                       break;
+               msleep(50);
+       }
+
+       if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 2000))
+               dev_warn(dev->mt76.dev, "MAC RX failed to stop\n");
+
+       /* wait rx dma to stop */
+       for (i = 0; i < 2000; i++) {
+               val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+               if (!(val & MT_USB_DMA_CFG_RX_BUSY) && i > 10)
+                       break;
+               usleep_range(50, 100);
+       }
+
+       mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+
+       return 0;
+}
+
+void mt76x2u_mac_resume(struct mt76x02_dev *dev)
+{
+       mt76_wr(dev, MT_MAC_SYS_CTRL,
+               MT_MAC_SYS_CTRL_ENABLE_TX |
+               MT_MAC_SYS_CTRL_ENABLE_RX);
+       mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20));
+       mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1));
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
new file mode 100644 (file)
index 0000000..224609d
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+
+static int mt76x2u_start(struct ieee80211_hw *hw)
+{
+       struct mt76x02_dev *dev = hw->priv;
+       int ret;
+
+       mutex_lock(&dev->mt76.mutex);
+
+       ret = mt76x2u_mac_start(dev);
+       if (ret)
+               goto out;
+
+       set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+out:
+       mutex_unlock(&dev->mt76.mutex);
+       return ret;
+}
+
+static void mt76x2u_stop(struct ieee80211_hw *hw)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       mutex_lock(&dev->mt76.mutex);
+       clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+       mt76x2u_stop_hw(dev);
+       mutex_unlock(&dev->mt76.mutex);
+}
+
+static int mt76x2u_add_interface(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
+               mt76x02_mac_setaddr(&dev->mt76, vif->addr);
+
+       mt76x02_vif_init(&dev->mt76, vif, 0);
+       return 0;
+}
+
+static int
+mt76x2u_set_channel(struct mt76x02_dev *dev,
+                   struct cfg80211_chan_def *chandef)
+{
+       int err;
+
+       cancel_delayed_work_sync(&dev->cal_work);
+       set_bit(MT76_RESET, &dev->mt76.state);
+
+       mt76_set_channel(&dev->mt76);
+
+       mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
+       mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
+       mt76x2_mac_stop(dev, false);
+
+       err = mt76x2u_phy_set_channel(dev, chandef);
+
+       mt76x2u_mac_resume(dev);
+
+       clear_bit(MT76_RESET, &dev->mt76.state);
+       mt76_txq_schedule_all(&dev->mt76);
+
+       return err;
+}
+
+static void
+mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                        struct ieee80211_bss_conf *info, u32 changed)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       mutex_lock(&dev->mt76.mutex);
+
+       if (changed & BSS_CHANGED_ASSOC) {
+               mt76x2u_phy_channel_calibrate(dev);
+               mt76x2_apply_gain_adj(dev);
+       }
+
+       if (changed & BSS_CHANGED_BSSID) {
+               mt76_wr(dev, MT_MAC_BSSID_DW0,
+                       get_unaligned_le32(info->bssid));
+               mt76_wr(dev, MT_MAC_BSSID_DW1,
+                       get_unaligned_le16(info->bssid + 4));
+       }
+
+       mutex_unlock(&dev->mt76.mutex);
+}
+
+static int
+mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct mt76x02_dev *dev = hw->priv;
+       int err = 0;
+
+       mutex_lock(&dev->mt76.mutex);
+
+       if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+               if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+                       dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+               else
+                       dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+               mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               ieee80211_stop_queues(hw);
+               err = mt76x2u_set_channel(dev, &hw->conf.chandef);
+               ieee80211_wake_queues(hw);
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_POWER) {
+               dev->mt76.txpower_conf = hw->conf.power_level * 2;
+
+               /* convert to per-chain power for 2x2 devices */
+               dev->mt76.txpower_conf -= 6;
+
+               if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+                       mt76x2_phy_set_txpower(dev);
+       }
+
+       mutex_unlock(&dev->mt76.mutex);
+
+       return err;
+}
+
+static void
+mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+               const u8 *mac)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       clear_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+const struct ieee80211_ops mt76x2u_ops = {
+       .tx = mt76x02_tx,
+       .start = mt76x2u_start,
+       .stop = mt76x2u_stop,
+       .add_interface = mt76x2u_add_interface,
+       .remove_interface = mt76x02_remove_interface,
+       .sta_add = mt76x02_sta_add,
+       .sta_remove = mt76x02_sta_remove,
+       .set_key = mt76x02_set_key,
+       .ampdu_action = mt76x02_ampdu_action,
+       .config = mt76x2u_config,
+       .wake_tx_queue = mt76_wake_tx_queue,
+       .bss_info_changed = mt76x2u_bss_info_changed,
+       .configure_filter = mt76x02_configure_filter,
+       .conf_tx = mt76x02_conf_tx,
+       .sw_scan_start = mt76x2u_sw_scan,
+       .sw_scan_complete = mt76x2u_sw_scan_complete,
+       .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
new file mode 100644 (file)
index 0000000..259ceae
--- /dev/null
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+
+#include "mt76x2u.h"
+#include "eeprom.h"
+#include "../mt76x02_usb.h"
+
+#define MT_CMD_HDR_LEN                 4
+
+#define MCU_FW_URB_MAX_PAYLOAD         0x3900
+#define MCU_ROM_PATCH_MAX_PAYLOAD      2048
+
+#define MT76U_MCU_ILM_OFFSET           0x80000
+#define MT76U_MCU_DLM_OFFSET           0x110000
+#define MT76U_MCU_ROM_PATCH_OFFSET     0x90000
+
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap,
+                               bool ext, int rssi, u32 false_cca)
+{
+       struct {
+               __le32 channel;
+               __le32 rssi_val;
+               __le32 false_cca_val;
+       } __packed __aligned(4) msg = {
+               .rssi_val = cpu_to_le32(rssi),
+               .false_cca_val = cpu_to_le32(false_cca),
+       };
+       struct sk_buff *skb;
+       u32 val = channel;
+
+       if (ap)
+               val |= BIT(31);
+       if (ext)
+               val |= BIT(30);
+       msg.channel = cpu_to_le32(val);
+
+       skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+       return mt76_mcu_send_msg(dev, skb, CMD_DYNC_VGA_OP, true);
+}
+
+static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev)
+{
+       mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+                            USB_DIR_OUT | USB_TYPE_VENDOR,
+                            0x12, 0, NULL, 0);
+}
+
+static void mt76x2u_mcu_enable_patch(struct mt76x02_dev *dev)
+{
+       struct mt76_usb *usb = &dev->mt76.usb;
+       const u8 data[] = {
+               0x6f, 0xfc, 0x08, 0x01,
+               0x20, 0x04, 0x00, 0x00,
+               0x00, 0x09, 0x00,
+       };
+
+       memcpy(usb->data, data, sizeof(data));
+       mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+                            USB_DIR_OUT | USB_TYPE_CLASS,
+                            0x12, 0, usb->data, sizeof(data));
+}
+
+static void mt76x2u_mcu_reset_wmt(struct mt76x02_dev *dev)
+{
+       struct mt76_usb *usb = &dev->mt76.usb;
+       u8 data[] = {
+               0x6f, 0xfc, 0x05, 0x01,
+               0x07, 0x01, 0x00, 0x04
+       };
+
+       memcpy(usb->data, data, sizeof(data));
+       mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+                            USB_DIR_OUT | USB_TYPE_CLASS,
+                            0x12, 0, usb->data, sizeof(data));
+}
+
+static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev)
+{
+       bool rom_protect = !is_mt7612(dev);
+       struct mt76x02_patch_header *hdr;
+       u32 val, patch_mask, patch_reg;
+       const struct firmware *fw;
+       int err;
+
+       if (rom_protect &&
+           !mt76_poll_msec(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+               dev_err(dev->mt76.dev,
+                       "could not get hardware semaphore for ROM PATCH\n");
+               return -ETIMEDOUT;
+       }
+
+       if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+               patch_mask = BIT(0);
+               patch_reg = MT_MCU_CLOCK_CTL;
+       } else {
+               patch_mask = BIT(1);
+               patch_reg = MT_MCU_COM_REG0;
+       }
+
+       if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+               dev_info(dev->mt76.dev, "ROM patch already applied\n");
+               return 0;
+       }
+
+       err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev);
+       if (err < 0)
+               return err;
+
+       if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+               dev_err(dev->mt76.dev, "failed to load firmware\n");
+               err = -EIO;
+               goto out;
+       }
+
+       hdr = (struct mt76x02_patch_header *)fw->data;
+       dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+       /* enable USB_DMA_CFG */
+       val = MT_USB_DMA_CFG_RX_BULK_EN |
+             MT_USB_DMA_CFG_TX_BULK_EN |
+             FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+       mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+
+       /* vendor reset */
+       mt76x02u_mcu_fw_reset(&dev->mt76);
+       usleep_range(5000, 10000);
+
+       /* enable FCE to send in-band cmd */
+       mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+       /* FCE tx_fs_base_ptr */
+       mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+       /* FCE tx_fs_max_cnt */
+       mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+       /* FCE pdma enable */
+       mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+       /* FCE skip_fs_en */
+       mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+       err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+                                       fw->size - sizeof(*hdr),
+                                       MCU_ROM_PATCH_MAX_PAYLOAD,
+                                       MT76U_MCU_ROM_PATCH_OFFSET);
+       if (err < 0) {
+               err = -EIO;
+               goto out;
+       }
+
+       mt76x2u_mcu_enable_patch(dev);
+       mt76x2u_mcu_reset_wmt(dev);
+       mdelay(20);
+
+       if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 100)) {
+               dev_err(dev->mt76.dev, "failed to load ROM patch\n");
+               err = -ETIMEDOUT;
+       }
+
+out:
+       if (rom_protect)
+               mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+       release_firmware(fw);
+       return err;
+}
+
+static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
+{
+       u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET;
+       const struct mt76x02_fw_header *hdr;
+       int err, len, ilm_len, dlm_len;
+       const struct firmware *fw;
+
+       err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev);
+       if (err < 0)
+               return err;
+
+       if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       hdr = (const struct mt76x02_fw_header *)fw->data;
+       ilm_len = le32_to_cpu(hdr->ilm_len);
+       dlm_len = le32_to_cpu(hdr->dlm_len);
+       len = sizeof(*hdr) + ilm_len + dlm_len;
+       if (fw->size != len) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       val = le16_to_cpu(hdr->fw_ver);
+       dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+                (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+       val = le16_to_cpu(hdr->build_ver);
+       dev_info(dev->mt76.dev, "Build: %x\n", val);
+       dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+       /* vendor reset */
+       mt76x02u_mcu_fw_reset(&dev->mt76);
+       usleep_range(5000, 10000);
+
+       /* enable USB_DMA_CFG */
+       val = MT_USB_DMA_CFG_RX_BULK_EN |
+             MT_USB_DMA_CFG_TX_BULK_EN |
+             FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+       mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+       /* enable FCE to send in-band cmd */
+       mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+       /* FCE tx_fs_base_ptr */
+       mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+       /* FCE tx_fs_max_cnt */
+       mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+       /* FCE pdma enable */
+       mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+       /* FCE skip_fs_en */
+       mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+       /* load ILM */
+       err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+                                       ilm_len, MCU_FW_URB_MAX_PAYLOAD,
+                                       MT76U_MCU_ILM_OFFSET);
+       if (err < 0) {
+               err = -EIO;
+               goto out;
+       }
+
+       /* load DLM */
+       if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+               dlm_offset += 0x800;
+       err = mt76x02u_mcu_fw_send_data(&dev->mt76,
+                                       fw->data + sizeof(*hdr) + ilm_len,
+                                       dlm_len, MCU_FW_URB_MAX_PAYLOAD,
+                                       dlm_offset);
+       if (err < 0) {
+               err = -EIO;
+               goto out;
+       }
+
+       mt76x2u_mcu_load_ivb(dev);
+       if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 100)) {
+               dev_err(dev->mt76.dev, "firmware failed to start\n");
+               err = -ETIMEDOUT;
+               goto out;
+       }
+
+       mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
+       /* enable FCE to send in-band cmd */
+       mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+       dev_dbg(dev->mt76.dev, "firmware running\n");
+       mt76x02_set_ethtool_fwver(&dev->mt76, hdr);
+
+out:
+       release_firmware(fw);
+       return err;
+}
+
+int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev)
+{
+       int err;
+
+       err = mt76x2u_mcu_load_rom_patch(dev);
+       if (err < 0)
+               return err;
+
+       return mt76x2u_mcu_load_firmware(dev);
+}
+
+int mt76x2u_mcu_init(struct mt76x02_dev *dev)
+{
+       int err;
+
+       err = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT,
+                                          1, false);
+       if (err < 0)
+               return err;
+
+       return mt76x02_mcu_set_radio_state(&dev->mt76, true, false);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
new file mode 100644 (file)
index 0000000..b11f8a6
--- /dev/null
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+
+void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev)
+{
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+       bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+       if (mt76x2_channel_silent(dev))
+               return;
+
+       mt76x2u_mac_stop(dev);
+
+       if (is_5ghz)
+               mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, false);
+
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, false);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, false);
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, false);
+
+       mt76x2u_mac_resume(dev);
+}
+
+static void
+mt76x2u_phy_update_channel_gain(struct mt76x02_dev *dev)
+{
+       u8 channel = dev->mt76.chandef.chan->hw_value;
+       int freq, freq1;
+       u32 false_cca;
+
+       freq = dev->mt76.chandef.chan->center_freq;
+       freq1 = dev->mt76.chandef.center_freq1;
+
+       switch (dev->mt76.chandef.width) {
+       case NL80211_CHAN_WIDTH_80: {
+               int ch_group_index;
+
+               ch_group_index = (freq - freq1 + 30) / 20;
+               if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+                       ch_group_index = 0;
+               channel += 6 - ch_group_index * 4;
+               break;
+       }
+       case NL80211_CHAN_WIDTH_40:
+               if (freq1 > freq)
+                       channel += 2;
+               else
+                       channel -= 2;
+               break;
+       default:
+               break;
+       }
+
+       dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(&dev->mt76);
+       false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
+                             mt76_rr(dev, MT_RX_STAT_1));
+
+       mt76x2u_mcu_set_dynamic_vga(dev, channel, false, false,
+                                   dev->cal.avg_rssi_all, false_cca);
+}
+
+void mt76x2u_phy_calibrate(struct work_struct *work)
+{
+       struct mt76x02_dev *dev;
+
+       dev = container_of(work, struct mt76x02_dev, cal_work.work);
+       mt76x2_phy_tssi_compensate(dev, false);
+       mt76x2u_phy_update_channel_gain(dev);
+
+       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+                                    MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
+                           struct cfg80211_chan_def *chandef)
+{
+       u32 ext_cca_chan[4] = {
+               [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+               [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+               [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+               [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+       };
+       bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+       struct ieee80211_channel *chan = chandef->chan;
+       u8 channel = chan->hw_value, bw, bw_index;
+       int ch_group_index, freq, freq1, ret;
+
+       dev->cal.channel_cal_done = false;
+       freq = chandef->chan->center_freq;
+       freq1 = chandef->center_freq1;
+
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_40:
+               bw = 1;
+               if (freq1 > freq) {
+                       bw_index = 1;
+                       ch_group_index = 0;
+               } else {
+                       bw_index = 3;
+                       ch_group_index = 1;
+               }
+               channel += 2 - ch_group_index * 4;
+               break;
+       case NL80211_CHAN_WIDTH_80:
+               ch_group_index = (freq - freq1 + 30) / 20;
+               if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+                       ch_group_index = 0;
+               bw = 2;
+               bw_index = ch_group_index;
+               channel += 6 - ch_group_index * 4;
+               break;
+       default:
+               bw = 0;
+               bw_index = 0;
+               ch_group_index = 0;
+               break;
+       }
+
+       mt76x2_read_rx_gain(dev);
+       mt76x2_phy_set_txpower_regs(dev, chan->band);
+       mt76x2_configure_tx_delay(dev, chan->band, bw);
+       mt76x2_phy_set_txpower(dev);
+
+       mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
+       mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+
+       mt76_rmw(dev, MT_EXT_CCA_CFG,
+                (MT_EXT_CCA_CFG_CCA0 |
+                 MT_EXT_CCA_CFG_CCA1 |
+                 MT_EXT_CCA_CFG_CCA2 |
+                 MT_EXT_CCA_CFG_CCA3 |
+                 MT_EXT_CCA_CFG_CCA_MASK),
+                ext_cca_chan[ch_group_index]);
+
+       ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
+       if (ret)
+               return ret;
+
+       mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+       /* Enable LDPC Rx */
+       if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+               mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+       if (!dev->cal.init_cal_done) {
+               u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT);
+
+               if (val != 0xff)
+                       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R,
+                                             0, false);
+       }
+
+       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, false);
+
+       /* Rx LPF calibration */
+       if (!dev->cal.init_cal_done)
+               mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, false);
+       dev->cal.init_cal_done = true;
+
+       mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
+       mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+       mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+       mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+       mt76_wr(dev, MT_TXOP_CTRL_CFG, 0X04101b3f);
+
+       mt76_set(dev, MT_BBP(TXO, 4), BIT(25));
+       mt76_set(dev, MT_BBP(RXO, 13), BIT(8));
+
+       if (scan)
+               return 0;
+
+       if (mt76x02_tssi_enabled(&dev->mt76)) {
+               /* init default values for temp compensation */
+               mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+                              0x38);
+               mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+                              0x38);
+
+               /* init tssi calibration */
+               if (!mt76x2_channel_silent(dev)) {
+                       struct ieee80211_channel *chan;
+                       u32 flag = 0;
+
+                       chan = dev->mt76.chandef.chan;
+                       if (chan->band == NL80211_BAND_5GHZ)
+                               flag |= BIT(0);
+                       if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band))
+                               flag |= BIT(8);
+                       mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI,
+                                             flag, false);
+                       dev->cal.tssi_cal_done = true;
+               }
+       }
+
+       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+                                    MT_CALIBRATE_INTERVAL);
+       return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
deleted file mode 100644 (file)
index a2338ba..0000000
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-
-void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
-{
-       struct mt76_txq *mtxq;
-
-       if (!txq)
-               return;
-
-       mtxq = (struct mt76_txq *) txq->drv_priv;
-       if (txq->sta) {
-               struct mt76x2_sta *sta;
-
-               sta = (struct mt76x2_sta *) txq->sta->drv_priv;
-               mtxq->wcid = &sta->wcid;
-       } else {
-               struct mt76x2_vif *mvif;
-
-               mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
-               mtxq->wcid = &mvif->group_wcid;
-       }
-
-       mt76_txq_init(&dev->mt76, txq);
-}
-EXPORT_SYMBOL_GPL(mt76x2_txq_init);
-
-int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                       struct ieee80211_ampdu_params *params)
-{
-       enum ieee80211_ampdu_mlme_action action = params->action;
-       struct ieee80211_sta *sta = params->sta;
-       struct mt76x2_dev *dev = hw->priv;
-       struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
-       struct ieee80211_txq *txq = sta->txq[params->tid];
-       u16 tid = params->tid;
-       u16 *ssn = &params->ssn;
-       struct mt76_txq *mtxq;
-
-       if (!txq)
-               return -EINVAL;
-
-       mtxq = (struct mt76_txq *)txq->drv_priv;
-
-       switch (action) {
-       case IEEE80211_AMPDU_RX_START:
-               mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
-               mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
-               break;
-       case IEEE80211_AMPDU_RX_STOP:
-               mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
-               mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
-                          BIT(16 + tid));
-               break;
-       case IEEE80211_AMPDU_TX_OPERATIONAL:
-               mtxq->aggr = true;
-               mtxq->send_bar = false;
-               ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
-               break;
-       case IEEE80211_AMPDU_TX_STOP_FLUSH:
-       case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
-               mtxq->aggr = false;
-               ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
-               break;
-       case IEEE80211_AMPDU_TX_START:
-               mtxq->agg_ssn = *ssn << 4;
-               ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-               break;
-       case IEEE80211_AMPDU_TX_STOP_CONT:
-               mtxq->aggr = false;
-               ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-               break;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mt76x2_ampdu_action);
-
-int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                  struct ieee80211_sta *sta)
-{
-       struct mt76x2_dev *dev = hw->priv;
-       struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
-       struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
-       int ret = 0;
-       int idx = 0;
-       int i;
-
-       mutex_lock(&dev->mutex);
-
-       idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
-       if (idx < 0) {
-               ret = -ENOSPC;
-               goto out;
-       }
-
-       msta->vif = mvif;
-       msta->wcid.sta = 1;
-       msta->wcid.idx = idx;
-       msta->wcid.hw_key_idx = -1;
-       mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
-       mt76x2_mac_wcid_set_drop(dev, idx, false);
-       for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
-               mt76x2_txq_init(dev, sta->txq[i]);
-
-       if (vif->type == NL80211_IFTYPE_AP)
-               set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
-
-       ewma_signal_init(&msta->rssi);
-
-       rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
-
-out:
-       mutex_unlock(&dev->mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(mt76x2_sta_add);
-
-int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                     struct ieee80211_sta *sta)
-{
-       struct mt76x2_dev *dev = hw->priv;
-       struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
-       int idx = msta->wcid.idx;
-       int i;
-
-       mutex_lock(&dev->mutex);
-       rcu_assign_pointer(dev->wcid[idx], NULL);
-       for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
-               mt76_txq_remove(&dev->mt76, sta->txq[i]);
-       mt76x2_mac_wcid_set_drop(dev, idx, true);
-       mt76_wcid_free(dev->wcid_mask, idx);
-       mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
-       mutex_unlock(&dev->mutex);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mt76x2_sta_remove);
-
-void mt76x2_remove_interface(struct ieee80211_hw *hw,
-                            struct ieee80211_vif *vif)
-{
-       struct mt76x2_dev *dev = hw->priv;
-
-       mt76_txq_remove(&dev->mt76, vif->txq);
-}
-EXPORT_SYMBOL_GPL(mt76x2_remove_interface);
-
-int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-                  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
-                  struct ieee80211_key_conf *key)
-{
-       struct mt76x2_dev *dev = hw->priv;
-       struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
-       struct mt76x2_sta *msta;
-       struct mt76_wcid *wcid;
-       int idx = key->keyidx;
-       int ret;
-
-       /* fall back to sw encryption for unsupported ciphers */
-       switch (key->cipher) {
-       case WLAN_CIPHER_SUITE_WEP40:
-       case WLAN_CIPHER_SUITE_WEP104:
-       case WLAN_CIPHER_SUITE_TKIP:
-       case WLAN_CIPHER_SUITE_CCMP:
-               break;
-       default:
-               return -EOPNOTSUPP;
-       }
-
-       /*
-        * The hardware does not support per-STA RX GTK, fall back
-        * to software mode for these.
-        */
-       if ((vif->type == NL80211_IFTYPE_ADHOC ||
-            vif->type == NL80211_IFTYPE_MESH_POINT) &&
-           (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
-            key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
-           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
-               return -EOPNOTSUPP;
-
-       msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
-       wcid = msta ? &msta->wcid : &mvif->group_wcid;
-
-       if (cmd == SET_KEY) {
-               key->hw_key_idx = wcid->idx;
-               wcid->hw_key_idx = idx;
-               if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
-                       key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
-                       wcid->sw_iv = true;
-               }
-       } else {
-               if (idx == wcid->hw_key_idx) {
-                       wcid->hw_key_idx = -1;
-                       wcid->sw_iv = true;
-               }
-
-               key = NULL;
-       }
-       mt76_wcid_key_setup(&dev->mt76, wcid, key);
-
-       if (!msta) {
-               if (key || wcid->hw_key_idx == idx) {
-                       ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
-                       if (ret)
-                               return ret;
-               }
-
-               return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
-       }
-
-       return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
-}
-EXPORT_SYMBOL_GPL(mt76x2_set_key);
-
-int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                  u16 queue, const struct ieee80211_tx_queue_params *params)
-{
-       struct mt76x2_dev *dev = hw->priv;
-       u8 cw_min = 5, cw_max = 10, qid;
-       u32 val;
-
-       qid = dev->mt76.q_tx[queue].hw_idx;
-
-       if (params->cw_min)
-               cw_min = fls(params->cw_min);
-       if (params->cw_max)
-               cw_max = fls(params->cw_max);
-
-       val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
-             FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
-             FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
-             FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
-       mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
-
-       val = mt76_rr(dev, MT_WMM_TXOP(qid));
-       val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
-       val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
-       mt76_wr(dev, MT_WMM_TXOP(qid), val);
-
-       val = mt76_rr(dev, MT_WMM_AIFSN);
-       val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
-       val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
-       mt76_wr(dev, MT_WMM_AIFSN, val);
-
-       val = mt76_rr(dev, MT_WMM_CWMIN);
-       val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
-       val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
-       mt76_wr(dev, MT_WMM_CWMIN, val);
-
-       val = mt76_rr(dev, MT_WMM_CWMAX);
-       val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
-       val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
-       mt76_wr(dev, MT_WMM_CWMAX, val);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mt76x2_conf_tx);
-
-void mt76x2_configure_filter(struct ieee80211_hw *hw,
-                            unsigned int changed_flags,
-                            unsigned int *total_flags, u64 multicast)
-{
-       struct mt76x2_dev *dev = hw->priv;
-       u32 flags = 0;
-
-#define MT76_FILTER(_flag, _hw) do { \
-               flags |= *total_flags & FIF_##_flag;                    \
-               dev->rxfilter &= ~(_hw);                                \
-               dev->rxfilter |= !(flags & FIF_##_flag) * (_hw);        \
-       } while (0)
-
-       mutex_lock(&dev->mutex);
-
-       dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
-
-       MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
-       MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
-       MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
-                            MT_RX_FILTR_CFG_CTS |
-                            MT_RX_FILTR_CFG_CFEND |
-                            MT_RX_FILTR_CFG_CFACK |
-                            MT_RX_FILTR_CFG_BA |
-                            MT_RX_FILTR_CFG_CTRL_RSV);
-       MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
-
-       *total_flags = flags;
-       mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
-       mutex_unlock(&dev->mutex);
-}
-EXPORT_SYMBOL_GPL(mt76x2_configure_filter);
-
-void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
-                               struct ieee80211_vif *vif,
-                               struct ieee80211_sta *sta)
-{
-       struct mt76x2_dev *dev = hw->priv;
-       struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
-       struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
-       struct ieee80211_tx_rate rate = {};
-
-       if (!rates)
-               return;
-
-       rate.idx = rates->rate[0].idx;
-       rate.flags = rates->rate[0].flags;
-       mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
-       msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
-}
-EXPORT_SYMBOL_GPL(mt76x2_sta_rate_tbl_update);
-
-void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-                        struct sk_buff *skb)
-{
-       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-       void *rxwi = skb->data;
-
-       if (q == MT_RXQ_MCU) {
-               skb_queue_tail(&dev->mcu.res_q, skb);
-               wake_up(&dev->mcu.wait);
-               return;
-       }
-
-       skb_pull(skb, sizeof(struct mt76x2_rxwi));
-       if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
-               dev_kfree_skb(skb);
-               return;
-       }
-
-       mt76_rx(&dev->mt76, q, skb);
-}
-EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
deleted file mode 100644 (file)
index 2629779..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include "mt76x2.h"
-#include "mt76x2_trace.h"
-
-void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->irq_lock, flags);
-       dev->irqmask &= ~clear;
-       dev->irqmask |= set;
-       mt76_wr(dev, MT_INT_MASK_CSR, dev->irqmask);
-       spin_unlock_irqrestore(&dev->irq_lock, flags);
-}
-
-void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-{
-       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-
-       mt76x2_irq_enable(dev, MT_INT_RX_DONE(q));
-}
-
-irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance)
-{
-       struct mt76x2_dev *dev = dev_instance;
-       u32 intr;
-
-       intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
-       mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
-
-       if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
-               return IRQ_NONE;
-
-       trace_dev_irq(dev, intr, dev->irqmask);
-
-       intr &= dev->irqmask;
-
-       if (intr & MT_INT_TX_DONE_ALL) {
-               mt76x2_irq_disable(dev, MT_INT_TX_DONE_ALL);
-               tasklet_schedule(&dev->tx_tasklet);
-       }
-
-       if (intr & MT_INT_RX_DONE(0)) {
-               mt76x2_irq_disable(dev, MT_INT_RX_DONE(0));
-               napi_schedule(&dev->mt76.napi[0]);
-       }
-
-       if (intr & MT_INT_RX_DONE(1)) {
-               mt76x2_irq_disable(dev, MT_INT_RX_DONE(1));
-               napi_schedule(&dev->mt76.napi[1]);
-       }
-
-       if (intr & MT_INT_PRE_TBTT)
-               tasklet_schedule(&dev->pre_tbtt_tasklet);
-
-       /* send buffered multicast frames now */
-       if (intr & MT_INT_TBTT)
-               mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
-
-       if (intr & MT_INT_TX_STAT) {
-               mt76x2_mac_poll_tx_status(dev, true);
-               tasklet_schedule(&dev->tx_tasklet);
-       }
-
-       if (intr & MT_INT_GPTIMER) {
-               mt76x2_irq_disable(dev, MT_INT_GPTIMER);
-               tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
-       }
-
-       return IRQ_HANDLED;
-}
-
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
deleted file mode 100644 (file)
index 77b5ff1..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/debugfs.h>
-#include "mt76x2.h"
-
-static int
-mt76x2_ampdu_stat_read(struct seq_file *file, void *data)
-{
-       struct mt76x2_dev *dev = file->private;
-       int i, j;
-
-       for (i = 0; i < 4; i++) {
-               seq_puts(file, "Length: ");
-               for (j = 0; j < 8; j++)
-                       seq_printf(file, "%8d | ", i * 8 + j + 1);
-               seq_puts(file, "\n");
-               seq_puts(file, "Count:  ");
-               for (j = 0; j < 8; j++)
-                       seq_printf(file, "%8d | ", dev->aggr_stats[i * 8 + j]);
-               seq_puts(file, "\n");
-               seq_puts(file, "--------");
-               for (j = 0; j < 8; j++)
-                       seq_puts(file, "-----------");
-               seq_puts(file, "\n");
-       }
-
-       return 0;
-}
-
-static int
-mt76x2_ampdu_stat_open(struct inode *inode, struct file *f)
-{
-       return single_open(f, mt76x2_ampdu_stat_read, inode->i_private);
-}
-
-static void
-seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len)
-{
-       int i;
-
-       seq_printf(file, "%10s:", str);
-       for (i = 0; i < len; i++)
-               seq_printf(file, " %2d", val[i]);
-       seq_puts(file, "\n");
-}
-
-static int read_txpower(struct seq_file *file, void *data)
-{
-       struct mt76x2_dev *dev = dev_get_drvdata(file->private);
-
-       seq_printf(file, "Target power: %d\n", dev->target_power);
-
-       seq_puts_array(file, "Delta", dev->target_power_delta,
-                      ARRAY_SIZE(dev->target_power_delta));
-       seq_puts_array(file, "CCK", dev->rate_power.cck,
-                      ARRAY_SIZE(dev->rate_power.cck));
-       seq_puts_array(file, "OFDM", dev->rate_power.ofdm,
-                      ARRAY_SIZE(dev->rate_power.ofdm));
-       seq_puts_array(file, "HT", dev->rate_power.ht,
-                      ARRAY_SIZE(dev->rate_power.ht));
-       seq_puts_array(file, "VHT", dev->rate_power.vht,
-                      ARRAY_SIZE(dev->rate_power.vht));
-       return 0;
-}
-
-static const struct file_operations fops_ampdu_stat = {
-       .open = mt76x2_ampdu_stat_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static int
-mt76x2_dfs_stat_read(struct seq_file *file, void *data)
-{
-       int i;
-       struct mt76x2_dev *dev = file->private;
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-
-       seq_printf(file, "allocated sequences:\t%d\n",
-                  dfs_pd->seq_stats.seq_pool_len);
-       seq_printf(file, "used sequences:\t\t%d\n",
-                  dfs_pd->seq_stats.seq_len);
-       seq_puts(file, "\n");
-
-       for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
-               seq_printf(file, "engine: %d\n", i);
-               seq_printf(file, "  hw pattern detected:\t%d\n",
-                          dfs_pd->stats[i].hw_pattern);
-               seq_printf(file, "  hw pulse discarded:\t%d\n",
-                          dfs_pd->stats[i].hw_pulse_discarded);
-               seq_printf(file, "  sw pattern detected:\t%d\n",
-                          dfs_pd->stats[i].sw_pattern);
-       }
-
-       return 0;
-}
-
-static int
-mt76x2_dfs_stat_open(struct inode *inode, struct file *f)
-{
-       return single_open(f, mt76x2_dfs_stat_read, inode->i_private);
-}
-
-static const struct file_operations fops_dfs_stat = {
-       .open = mt76x2_dfs_stat_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static int read_agc(struct seq_file *file, void *data)
-{
-       struct mt76x2_dev *dev = dev_get_drvdata(file->private);
-
-       seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all);
-       seq_printf(file, "low_gain: %d\n", dev->cal.low_gain);
-       seq_printf(file, "false_cca: %d\n", dev->cal.false_cca);
-       seq_printf(file, "agc_gain_adjust: %d\n", dev->cal.agc_gain_adjust);
-
-       return 0;
-}
-
-void mt76x2_init_debugfs(struct mt76x2_dev *dev)
-{
-       struct dentry *dir;
-
-       dir = mt76_register_debugfs(&dev->mt76);
-       if (!dir)
-               return;
-
-       debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
-       debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
-
-       debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
-       debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
-       debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
-                                   read_txpower);
-
-       debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
-}
-EXPORT_SYMBOL_GPL(mt76x2_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
deleted file mode 100644 (file)
index 374cc65..0000000
+++ /dev/null
@@ -1,877 +0,0 @@
-/*
- * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-
-#define RADAR_SPEC(m, len, el, eh, wl, wh,             \
-                  w_tolerance, tl, th, t_tolerance,    \
-                  bl, bh, event_exp, power_jmp)        \
-{                                                      \
-       .mode = m,                                      \
-       .avg_len = len,                                 \
-       .e_low = el,                                    \
-       .e_high = eh,                                   \
-       .w_low = wl,                                    \
-       .w_high = wh,                                   \
-       .w_margin = w_tolerance,                        \
-       .t_low = tl,                                    \
-       .t_high = th,                                   \
-       .t_margin = t_tolerance,                        \
-       .b_low = bl,                                    \
-       .b_high = bh,                                   \
-       .event_expiration = event_exp,                  \
-       .pwr_jmp = power_jmp                            \
-}
-
-static const struct mt76x2_radar_specs etsi_radar_specs[] = {
-       /* 20MHz */
-       RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
-                  0x7fffffff, 0x155cc0, 0x19cc),
-       RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
-                  0x7fffffff, 0x155cc0, 0x19cc),
-       RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
-                  0x7fffffff, 0x155cc0, 0x19dd),
-       RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
-                  0x7fffffff, 0x2191c0, 0x15cc),
-       /* 40MHz */
-       RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
-                  0x7fffffff, 0x155cc0, 0x19cc),
-       RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
-                  0x7fffffff, 0x155cc0, 0x19cc),
-       RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
-                  0x7fffffff, 0x155cc0, 0x19dd),
-       RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
-                  0x7fffffff, 0x2191c0, 0x15cc),
-       /* 80MHz */
-       RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
-                  0x7fffffff, 0x155cc0, 0x19cc),
-       RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
-                  0x7fffffff, 0x155cc0, 0x19cc),
-       RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
-                  0x7fffffff, 0x155cc0, 0x19dd),
-       RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
-                  0x7fffffff, 0x2191c0, 0x15cc)
-};
-
-static const struct mt76x2_radar_specs fcc_radar_specs[] = {
-       /* 20MHz */
-       RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
-                  0x7fffffff, 0xfe808, 0x13dc),
-       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
-                  0x7fffffff, 0xfe808, 0x19dd),
-       RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
-                  0x7fffffff, 0xfe808, 0x12cc),
-       RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
-                  0x3938700, 0x57bcf00, 0x1289),
-       /* 40MHz */
-       RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
-                  0x7fffffff, 0xfe808, 0x13dc),
-       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
-                  0x7fffffff, 0xfe808, 0x19dd),
-       RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
-                  0x7fffffff, 0xfe808, 0x12cc),
-       RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
-                  0x3938700, 0x57bcf00, 0x1289),
-       /* 80MHz */
-       RADAR_SPEC(0, 8, 2, 14, 106, 150, 15, 2900, 80100, 15, 0,
-                  0x7fffffff, 0xfe808, 0x16cc),
-       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
-                  0x7fffffff, 0xfe808, 0x19dd),
-       RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
-                  0x7fffffff, 0xfe808, 0x12cc),
-       RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
-                  0x3938700, 0x57bcf00, 0x1289)
-};
-
-static const struct mt76x2_radar_specs jp_w56_radar_specs[] = {
-       /* 20MHz */
-       RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
-                  0x7fffffff, 0x14c080, 0x13dc),
-       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
-                  0x7fffffff, 0x14c080, 0x19dd),
-       RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
-                  0x7fffffff, 0x14c080, 0x12cc),
-       RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
-                  0x3938700, 0X57bcf00, 0x1289),
-       /* 40MHz */
-       RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
-                  0x7fffffff, 0x14c080, 0x13dc),
-       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
-                  0x7fffffff, 0x14c080, 0x19dd),
-       RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
-                  0x7fffffff, 0x14c080, 0x12cc),
-       RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
-                  0x3938700, 0X57bcf00, 0x1289),
-       /* 80MHz */
-       RADAR_SPEC(0, 8, 2, 9, 106, 150, 15, 2900, 80100, 15, 0,
-                  0x7fffffff, 0x14c080, 0x16cc),
-       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
-                  0x7fffffff, 0x14c080, 0x19dd),
-       RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
-                  0x7fffffff, 0x14c080, 0x12cc),
-       RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
-                  0x3938700, 0X57bcf00, 0x1289)
-};
-
-static const struct mt76x2_radar_specs jp_w53_radar_specs[] = {
-       /* 20MHz */
-       RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
-                  0x7fffffff, 0x14c080, 0x16cc),
-       { 0 },
-       RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
-                  0x7fffffff, 0x14c080, 0x16cc),
-       { 0 },
-       /* 40MHz */
-       RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
-                  0x7fffffff, 0x14c080, 0x16cc),
-       { 0 },
-       RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
-                  0x7fffffff, 0x14c080, 0x16cc),
-       { 0 },
-       /* 80MHz */
-       RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
-                  0x7fffffff, 0x14c080, 0x16cc),
-       { 0 },
-       RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
-                  0x7fffffff, 0x14c080, 0x16cc),
-       { 0 }
-};
-
-static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
-                                            u8 enable)
-{
-       u32 data;
-
-       data = (1 << 1) | enable;
-       mt76_wr(dev, MT_BBP(DFS, 36), data);
-}
-
-static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev,
-                                   struct mt76x2_dfs_sequence *seq)
-{
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-
-       list_add(&seq->head, &dfs_pd->seq_pool);
-
-       dfs_pd->seq_stats.seq_pool_len++;
-       dfs_pd->seq_stats.seq_len--;
-}
-
-static
-struct mt76x2_dfs_sequence *mt76x2_dfs_seq_pool_get(struct mt76x2_dev *dev)
-{
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-       struct mt76x2_dfs_sequence *seq;
-
-       if (list_empty(&dfs_pd->seq_pool)) {
-               seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC);
-       } else {
-               seq = list_first_entry(&dfs_pd->seq_pool,
-                                      struct mt76x2_dfs_sequence,
-                                      head);
-               list_del(&seq->head);
-               dfs_pd->seq_stats.seq_pool_len--;
-       }
-       if (seq)
-               dfs_pd->seq_stats.seq_len++;
-
-       return seq;
-}
-
-static int mt76x2_dfs_get_multiple(int val, int frac, int margin)
-{
-       int remainder, factor;
-
-       if (!frac)
-               return 0;
-
-       if (abs(val - frac) <= margin)
-               return 1;
-
-       factor = val / frac;
-       remainder = val % frac;
-
-       if (remainder > margin) {
-               if ((frac - remainder) <= margin)
-                       factor++;
-               else
-                       factor = 0;
-       }
-       return factor;
-}
-
-static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev)
-{
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-       struct mt76x2_dfs_sequence *seq, *tmp_seq;
-       int i;
-
-       /* reset hw detector */
-       mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
-
-       /* reset sw detector */
-       for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
-               dfs_pd->event_rb[i].h_rb = 0;
-               dfs_pd->event_rb[i].t_rb = 0;
-       }
-
-       list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
-               list_del_init(&seq->head);
-               mt76x2_dfs_seq_pool_put(dev, seq);
-       }
-}
-
-static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
-{
-       bool ret = false;
-       u32 current_ts, delta_ts;
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-
-       current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER);
-       delta_ts = current_ts - dfs_pd->chirp_pulse_ts;
-       dfs_pd->chirp_pulse_ts = current_ts;
-
-       /* 12 sec */
-       if (delta_ts <= (12 * (1 << 20))) {
-               if (++dfs_pd->chirp_pulse_cnt > 8)
-                       ret = true;
-       } else {
-               dfs_pd->chirp_pulse_cnt = 1;
-       }
-
-       return ret;
-}
-
-static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev,
-                                   struct mt76x2_dfs_hw_pulse *pulse)
-{
-       u32 data;
-
-       /* select channel */
-       data = (MT_DFS_CH_EN << 16) | pulse->engine;
-       mt76_wr(dev, MT_BBP(DFS, 0), data);
-
-       /* reported period */
-       pulse->period = mt76_rr(dev, MT_BBP(DFS, 19));
-
-       /* reported width */
-       pulse->w1 = mt76_rr(dev, MT_BBP(DFS, 20));
-       pulse->w2 = mt76_rr(dev, MT_BBP(DFS, 23));
-
-       /* reported burst number */
-       pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
-}
-
-static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
-                                     struct mt76x2_dfs_hw_pulse *pulse)
-{
-       bool ret = false;
-
-       if (!pulse->period || !pulse->w1)
-               return false;
-
-       switch (dev->dfs_pd.region) {
-       case NL80211_DFS_FCC:
-               if (pulse->engine > 3)
-                       break;
-
-               if (pulse->engine == 3) {
-                       ret = mt76x2_dfs_check_chirp(dev);
-                       break;
-               }
-
-               /* check short pulse*/
-               if (pulse->w1 < 120)
-                       ret = (pulse->period >= 2900 &&
-                              (pulse->period <= 4700 ||
-                               pulse->period >= 6400) &&
-                              (pulse->period <= 6800 ||
-                               pulse->period >= 10200) &&
-                              pulse->period <= 61600);
-               else if (pulse->w1 < 130) /* 120 - 130 */
-                       ret = (pulse->period >= 2900 &&
-                              pulse->period <= 61600);
-               else
-                       ret = (pulse->period >= 3500 &&
-                              pulse->period <= 10100);
-               break;
-       case NL80211_DFS_ETSI:
-               if (pulse->engine >= 3)
-                       break;
-
-               ret = (pulse->period >= 4900 &&
-                      (pulse->period <= 10200 ||
-                       pulse->period >= 12400) &&
-                      pulse->period <= 100100);
-               break;
-       case NL80211_DFS_JP:
-               if (dev->mt76.chandef.chan->center_freq >= 5250 &&
-                   dev->mt76.chandef.chan->center_freq <= 5350) {
-                       /* JPW53 */
-                       if (pulse->w1 <= 130)
-                               ret = (pulse->period >= 28360 &&
-                                      (pulse->period <= 28700 ||
-                                       pulse->period >= 76900) &&
-                                      pulse->period <= 76940);
-                       break;
-               }
-
-               if (pulse->engine > 3)
-                       break;
-
-               if (pulse->engine == 3) {
-                       ret = mt76x2_dfs_check_chirp(dev);
-                       break;
-               }
-
-               /* check short pulse*/
-               if (pulse->w1 < 120)
-                       ret = (pulse->period >= 2900 &&
-                              (pulse->period <= 4700 ||
-                               pulse->period >= 6400) &&
-                              (pulse->period <= 6800 ||
-                               pulse->period >= 27560) &&
-                              (pulse->period <= 27960 ||
-                               pulse->period >= 28360) &&
-                              (pulse->period <= 28700 ||
-                               pulse->period >= 79900) &&
-                              pulse->period <= 80100);
-               else if (pulse->w1 < 130) /* 120 - 130 */
-                       ret = (pulse->period >= 2900 &&
-                              (pulse->period <= 10100 ||
-                               pulse->period >= 27560) &&
-                              (pulse->period <= 27960 ||
-                               pulse->period >= 28360) &&
-                              (pulse->period <= 28700 ||
-                               pulse->period >= 79900) &&
-                              pulse->period <= 80100);
-               else
-                       ret = (pulse->period >= 3900 &&
-                              pulse->period <= 10100);
-               break;
-       case NL80211_DFS_UNSET:
-       default:
-               return false;
-       }
-
-       return ret;
-}
-
-static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev,
-                                  struct mt76x2_dfs_event *event)
-{
-       u32 data;
-
-       /* 1st: DFS_R37[31]: 0 (engine 0) - 1 (engine 2)
-        * 2nd: DFS_R37[21:0]: pulse time
-        * 3rd: DFS_R37[11:0]: pulse width
-        * 3rd: DFS_R37[25:16]: phase
-        * 4th: DFS_R37[12:0]: current pwr
-        * 4th: DFS_R37[21:16]: pwr stable counter
-        *
-        * 1st: DFS_R37[31:0] set to 0xffffffff means no event detected
-        */
-       data = mt76_rr(dev, MT_BBP(DFS, 37));
-       if (!MT_DFS_CHECK_EVENT(data))
-               return false;
-
-       event->engine = MT_DFS_EVENT_ENGINE(data);
-       data = mt76_rr(dev, MT_BBP(DFS, 37));
-       event->ts = MT_DFS_EVENT_TIMESTAMP(data);
-       data = mt76_rr(dev, MT_BBP(DFS, 37));
-       event->width = MT_DFS_EVENT_WIDTH(data);
-
-       return true;
-}
-
-static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev,
-                                  struct mt76x2_dfs_event *event)
-{
-       if (event->engine == 2) {
-               struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-               struct mt76x2_dfs_event_rb *event_buff = &dfs_pd->event_rb[1];
-               u16 last_event_idx;
-               u32 delta_ts;
-
-               last_event_idx = mt76_decr(event_buff->t_rb,
-                                          MT_DFS_EVENT_BUFLEN);
-               delta_ts = event->ts - event_buff->data[last_event_idx].ts;
-               if (delta_ts < MT_DFS_EVENT_TIME_MARGIN &&
-                   event_buff->data[last_event_idx].width >= 200)
-                       return false;
-       }
-       return true;
-}
-
-static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev,
-                                  struct mt76x2_dfs_event *event)
-{
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-       struct mt76x2_dfs_event_rb *event_buff;
-
-       /* add radar event to ring buffer */
-       event_buff = event->engine == 2 ? &dfs_pd->event_rb[1]
-                                       : &dfs_pd->event_rb[0];
-       event_buff->data[event_buff->t_rb] = *event;
-       event_buff->data[event_buff->t_rb].fetch_ts = jiffies;
-
-       event_buff->t_rb = mt76_incr(event_buff->t_rb, MT_DFS_EVENT_BUFLEN);
-       if (event_buff->t_rb == event_buff->h_rb)
-               event_buff->h_rb = mt76_incr(event_buff->h_rb,
-                                            MT_DFS_EVENT_BUFLEN);
-}
-
-static int mt76x2_dfs_create_sequence(struct mt76x2_dev *dev,
-                                     struct mt76x2_dfs_event *event,
-                                     u16 cur_len)
-{
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-       struct mt76x2_dfs_sw_detector_params *sw_params;
-       u32 width_delta, with_sum, factor, cur_pri;
-       struct mt76x2_dfs_sequence seq, *seq_p;
-       struct mt76x2_dfs_event_rb *event_rb;
-       struct mt76x2_dfs_event *cur_event;
-       int i, j, end, pri;
-
-       event_rb = event->engine == 2 ? &dfs_pd->event_rb[1]
-                                     : &dfs_pd->event_rb[0];
-
-       i = mt76_decr(event_rb->t_rb, MT_DFS_EVENT_BUFLEN);
-       end = mt76_decr(event_rb->h_rb, MT_DFS_EVENT_BUFLEN);
-
-       while (i != end) {
-               cur_event = &event_rb->data[i];
-               with_sum = event->width + cur_event->width;
-
-               sw_params = &dfs_pd->sw_dpd_params;
-               switch (dev->dfs_pd.region) {
-               case NL80211_DFS_FCC:
-               case NL80211_DFS_JP:
-                       if (with_sum < 600)
-                               width_delta = 8;
-                       else
-                               width_delta = with_sum >> 3;
-                       break;
-               case NL80211_DFS_ETSI:
-                       if (event->engine == 2)
-                               width_delta = with_sum >> 6;
-                       else if (with_sum < 620)
-                               width_delta = 24;
-                       else
-                               width_delta = 8;
-                       break;
-               case NL80211_DFS_UNSET:
-               default:
-                       return -EINVAL;
-               }
-
-               pri = event->ts - cur_event->ts;
-               if (abs(event->width - cur_event->width) > width_delta ||
-                   pri < sw_params->min_pri)
-                       goto next;
-
-               if (pri > sw_params->max_pri)
-                       break;
-
-               seq.pri = event->ts - cur_event->ts;
-               seq.first_ts = cur_event->ts;
-               seq.last_ts = event->ts;
-               seq.engine = event->engine;
-               seq.count = 2;
-
-               j = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
-               while (j != end) {
-                       cur_event = &event_rb->data[j];
-                       cur_pri = event->ts - cur_event->ts;
-                       factor = mt76x2_dfs_get_multiple(cur_pri, seq.pri,
-                                               sw_params->pri_margin);
-                       if (factor > 0) {
-                               seq.first_ts = cur_event->ts;
-                               seq.count++;
-                       }
-
-                       j = mt76_decr(j, MT_DFS_EVENT_BUFLEN);
-               }
-               if (seq.count <= cur_len)
-                       goto next;
-
-               seq_p = mt76x2_dfs_seq_pool_get(dev);
-               if (!seq_p)
-                       return -ENOMEM;
-
-               *seq_p = seq;
-               INIT_LIST_HEAD(&seq_p->head);
-               list_add(&seq_p->head, &dfs_pd->sequences);
-next:
-               i = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
-       }
-       return 0;
-}
-
-static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev,
-                                           struct mt76x2_dfs_event *event)
-{
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-       struct mt76x2_dfs_sw_detector_params *sw_params;
-       struct mt76x2_dfs_sequence *seq, *tmp_seq;
-       u16 max_seq_len = 0;
-       u32 factor, pri;
-
-       sw_params = &dfs_pd->sw_dpd_params;
-       list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
-               if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) {
-                       list_del_init(&seq->head);
-                       mt76x2_dfs_seq_pool_put(dev, seq);
-                       continue;
-               }
-
-               if (event->engine != seq->engine)
-                       continue;
-
-               pri = event->ts - seq->last_ts;
-               factor = mt76x2_dfs_get_multiple(pri, seq->pri,
-                                                sw_params->pri_margin);
-               if (factor > 0) {
-                       seq->last_ts = event->ts;
-                       seq->count++;
-                       max_seq_len = max_t(u16, max_seq_len, seq->count);
-               }
-       }
-       return max_seq_len;
-}
-
-static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev)
-{
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-       struct mt76x2_dfs_sequence *seq;
-
-       if (list_empty(&dfs_pd->sequences))
-               return false;
-
-       list_for_each_entry(seq, &dfs_pd->sequences, head) {
-               if (seq->count > MT_DFS_SEQUENCE_TH) {
-                       dfs_pd->stats[seq->engine].sw_pattern++;
-                       return true;
-               }
-       }
-       return false;
-}
-
-static void mt76x2_dfs_add_events(struct mt76x2_dev *dev)
-{
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-       struct mt76x2_dfs_event event;
-       int i, seq_len;
-
-       /* disable debug mode */
-       mt76x2_dfs_set_capture_mode_ctrl(dev, false);
-       for (i = 0; i < MT_DFS_EVENT_LOOP; i++) {
-               if (!mt76x2_dfs_fetch_event(dev, &event))
-                       break;
-
-               if (dfs_pd->last_event_ts > event.ts)
-                       mt76x2_dfs_detector_reset(dev);
-               dfs_pd->last_event_ts = event.ts;
-
-               if (!mt76x2_dfs_check_event(dev, &event))
-                       continue;
-
-               seq_len = mt76x2_dfs_add_event_to_sequence(dev, &event);
-               mt76x2_dfs_create_sequence(dev, &event, seq_len);
-
-               mt76x2_dfs_queue_event(dev, &event);
-       }
-       mt76x2_dfs_set_capture_mode_ctrl(dev, true);
-}
-
-static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev)
-{
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-       struct mt76x2_dfs_event_rb *event_buff;
-       struct mt76x2_dfs_event *event;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
-               event_buff = &dfs_pd->event_rb[i];
-
-               while (event_buff->h_rb != event_buff->t_rb) {
-                       event = &event_buff->data[event_buff->h_rb];
-
-                       /* sorted list */
-                       if (time_is_after_jiffies(event->fetch_ts +
-                                                 MT_DFS_EVENT_WINDOW))
-                               break;
-                       event_buff->h_rb = mt76_incr(event_buff->h_rb,
-                                                    MT_DFS_EVENT_BUFLEN);
-               }
-       }
-}
-
-static void mt76x2_dfs_tasklet(unsigned long arg)
-{
-       struct mt76x2_dev *dev = (struct mt76x2_dev *)arg;
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-       u32 engine_mask;
-       int i;
-
-       if (test_bit(MT76_SCANNING, &dev->mt76.state))
-               goto out;
-
-       if (time_is_before_jiffies(dfs_pd->last_sw_check +
-                                  MT_DFS_SW_TIMEOUT)) {
-               bool radar_detected;
-
-               dfs_pd->last_sw_check = jiffies;
-
-               mt76x2_dfs_add_events(dev);
-               radar_detected = mt76x2_dfs_check_detection(dev);
-               if (radar_detected) {
-                       /* sw detector rx radar pattern */
-                       ieee80211_radar_detected(dev->mt76.hw);
-                       mt76x2_dfs_detector_reset(dev);
-
-                       return;
-               }
-               mt76x2_dfs_check_event_window(dev);
-       }
-
-       engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
-       if (!(engine_mask & 0xf))
-               goto out;
-
-       for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
-               struct mt76x2_dfs_hw_pulse pulse;
-
-               if (!(engine_mask & (1 << i)))
-                       continue;
-
-               pulse.engine = i;
-               mt76x2_dfs_get_hw_pulse(dev, &pulse);
-
-               if (!mt76x2_dfs_check_hw_pulse(dev, &pulse)) {
-                       dfs_pd->stats[i].hw_pulse_discarded++;
-                       continue;
-               }
-
-               /* hw detector rx radar pattern */
-               dfs_pd->stats[i].hw_pattern++;
-               ieee80211_radar_detected(dev->mt76.hw);
-               mt76x2_dfs_detector_reset(dev);
-
-               return;
-       }
-
-       /* reset hw detector */
-       mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
-
-out:
-       mt76x2_irq_enable(dev, MT_INT_GPTIMER);
-}
-
-static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev)
-{
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-
-       switch (dev->dfs_pd.region) {
-       case NL80211_DFS_FCC:
-               dfs_pd->sw_dpd_params.max_pri = MT_DFS_FCC_MAX_PRI;
-               dfs_pd->sw_dpd_params.min_pri = MT_DFS_FCC_MIN_PRI;
-               dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
-               break;
-       case NL80211_DFS_ETSI:
-               dfs_pd->sw_dpd_params.max_pri = MT_DFS_ETSI_MAX_PRI;
-               dfs_pd->sw_dpd_params.min_pri = MT_DFS_ETSI_MIN_PRI;
-               dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN << 2;
-               break;
-       case NL80211_DFS_JP:
-               dfs_pd->sw_dpd_params.max_pri = MT_DFS_JP_MAX_PRI;
-               dfs_pd->sw_dpd_params.min_pri = MT_DFS_JP_MIN_PRI;
-               dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
-               break;
-       case NL80211_DFS_UNSET:
-       default:
-               break;
-       }
-}
-
-static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
-{
-       u32 data;
-       u8 i, shift;
-       const struct mt76x2_radar_specs *radar_specs;
-
-       switch (dev->mt76.chandef.width) {
-       case NL80211_CHAN_WIDTH_40:
-               shift = MT_DFS_NUM_ENGINES;
-               break;
-       case NL80211_CHAN_WIDTH_80:
-               shift = 2 * MT_DFS_NUM_ENGINES;
-               break;
-       default:
-               shift = 0;
-               break;
-       }
-
-       switch (dev->dfs_pd.region) {
-       case NL80211_DFS_FCC:
-               radar_specs = &fcc_radar_specs[shift];
-               break;
-       case NL80211_DFS_ETSI:
-               radar_specs = &etsi_radar_specs[shift];
-               break;
-       case NL80211_DFS_JP:
-               if (dev->mt76.chandef.chan->center_freq >= 5250 &&
-                   dev->mt76.chandef.chan->center_freq <= 5350)
-                       radar_specs = &jp_w53_radar_specs[shift];
-               else
-                       radar_specs = &jp_w56_radar_specs[shift];
-               break;
-       case NL80211_DFS_UNSET:
-       default:
-               return;
-       }
-
-       data = (MT_DFS_VGA_MASK << 16) |
-              (MT_DFS_PWR_GAIN_OFFSET << 12) |
-              (MT_DFS_PWR_DOWN_TIME << 8) |
-              (MT_DFS_SYM_ROUND << 4) |
-              (MT_DFS_DELTA_DELAY & 0xf);
-       mt76_wr(dev, MT_BBP(DFS, 2), data);
-
-       data = (MT_DFS_RX_PE_MASK << 16) | MT_DFS_PKT_END_MASK;
-       mt76_wr(dev, MT_BBP(DFS, 3), data);
-
-       for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
-               /* configure engine */
-               mt76_wr(dev, MT_BBP(DFS, 0), i);
-
-               /* detection mode + avg_len */
-               data = ((radar_specs[i].avg_len & 0x1ff) << 16) |
-                      (radar_specs[i].mode & 0xf);
-               mt76_wr(dev, MT_BBP(DFS, 4), data);
-
-               /* dfs energy */
-               data = ((radar_specs[i].e_high & 0x0fff) << 16) |
-                      (radar_specs[i].e_low & 0x0fff);
-               mt76_wr(dev, MT_BBP(DFS, 5), data);
-
-               /* dfs period */
-               mt76_wr(dev, MT_BBP(DFS, 7), radar_specs[i].t_low);
-               mt76_wr(dev, MT_BBP(DFS, 9), radar_specs[i].t_high);
-
-               /* dfs burst */
-               mt76_wr(dev, MT_BBP(DFS, 11), radar_specs[i].b_low);
-               mt76_wr(dev, MT_BBP(DFS, 13), radar_specs[i].b_high);
-
-               /* dfs width */
-               data = ((radar_specs[i].w_high & 0x0fff) << 16) |
-                      (radar_specs[i].w_low & 0x0fff);
-               mt76_wr(dev, MT_BBP(DFS, 14), data);
-
-               /* dfs margins */
-               data = (radar_specs[i].w_margin << 16) |
-                      radar_specs[i].t_margin;
-               mt76_wr(dev, MT_BBP(DFS, 15), data);
-
-               /* dfs event expiration */
-               mt76_wr(dev, MT_BBP(DFS, 17), radar_specs[i].event_expiration);
-
-               /* dfs pwr adj */
-               mt76_wr(dev, MT_BBP(DFS, 30), radar_specs[i].pwr_jmp);
-       }
-
-       /* reset status */
-       mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
-       mt76_wr(dev, MT_BBP(DFS, 36), 0x3);
-
-       /* enable detection*/
-       mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
-       mt76_wr(dev, 0x212c, 0x0c350001);
-}
-
-void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
-{
-       u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
-
-       agc_r8 = mt76_rr(dev, MT_BBP(AGC, 8));
-       agc_r4 = mt76_rr(dev, MT_BBP(AGC, 4));
-
-       val_r8 = (agc_r8 & 0x00007e00) >> 9;
-       val_r4 = agc_r4 & ~0x1f000000;
-       val_r4 += (((val_r8 + 1) >> 1) << 24);
-       mt76_wr(dev, MT_BBP(AGC, 4), val_r4);
-
-       dfs_r31 = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, val_r4);
-       dfs_r31 += val_r8;
-       dfs_r31 -= (agc_r8 & 0x00000038) >> 3;
-       dfs_r31 = (dfs_r31 << 16) | 0x00000307;
-       mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31);
-
-       mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
-}
-
-void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
-{
-       struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
-
-       if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
-           dev->dfs_pd.region != NL80211_DFS_UNSET) {
-               mt76x2_dfs_init_sw_detector(dev);
-               mt76x2_dfs_set_bbp_params(dev);
-               /* enable debug mode */
-               mt76x2_dfs_set_capture_mode_ctrl(dev, true);
-
-               mt76x2_irq_enable(dev, MT_INT_GPTIMER);
-               mt76_rmw_field(dev, MT_INT_TIMER_EN,
-                              MT_INT_TIMER_EN_GP_TIMER_EN, 1);
-       } else {
-               /* disable hw detector */
-               mt76_wr(dev, MT_BBP(DFS, 0), 0);
-               /* clear detector status */
-               mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
-               mt76_wr(dev, 0x212c, 0);
-
-               mt76x2_irq_disable(dev, MT_INT_GPTIMER);
-               mt76_rmw_field(dev, MT_INT_TIMER_EN,
-                              MT_INT_TIMER_EN_GP_TIMER_EN, 0);
-       }
-}
-
-void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
-{
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-
-       INIT_LIST_HEAD(&dfs_pd->sequences);
-       INIT_LIST_HEAD(&dfs_pd->seq_pool);
-       dfs_pd->region = NL80211_DFS_UNSET;
-       dfs_pd->last_sw_check = jiffies;
-       tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet,
-                    (unsigned long)dev);
-}
-
-void mt76x2_dfs_set_domain(struct mt76x2_dev *dev,
-                          enum nl80211_dfs_regions region)
-{
-       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
-
-       if (dfs_pd->region != region) {
-               tasklet_disable(&dfs_pd->dfs_tasklet);
-               dfs_pd->region = region;
-               mt76x2_dfs_init_params(dev);
-               tasklet_enable(&dfs_pd->dfs_tasklet);
-       }
-}
-
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
deleted file mode 100644 (file)
index 693f421..0000000
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __MT76x2_DFS_H
-#define __MT76x2_DFS_H
-
-#include <linux/types.h>
-#include <linux/nl80211.h>
-
-#define MT_DFS_GP_INTERVAL             (10 << 4) /* 64 us unit */
-#define MT_DFS_NUM_ENGINES             4
-
-/* bbp params */
-#define MT_DFS_SYM_ROUND               0
-#define MT_DFS_DELTA_DELAY             2
-#define MT_DFS_VGA_MASK                        0
-#define MT_DFS_PWR_GAIN_OFFSET         3
-#define MT_DFS_PWR_DOWN_TIME           0xf
-#define MT_DFS_RX_PE_MASK              0xff
-#define MT_DFS_PKT_END_MASK            0
-#define MT_DFS_CH_EN                   0xf
-
-/* sw detector params */
-#define MT_DFS_EVENT_LOOP              64
-#define MT_DFS_SW_TIMEOUT              (HZ / 20)
-#define MT_DFS_EVENT_WINDOW            (HZ / 5)
-#define MT_DFS_SEQUENCE_WINDOW         (200 * (1 << 20))
-#define MT_DFS_EVENT_TIME_MARGIN       2000
-#define MT_DFS_PRI_MARGIN              4
-#define MT_DFS_SEQUENCE_TH             6
-
-#define MT_DFS_FCC_MAX_PRI             ((28570 << 1) + 1000)
-#define MT_DFS_FCC_MIN_PRI             (3000 - 2)
-#define MT_DFS_JP_MAX_PRI              ((80000 << 1) + 1000)
-#define MT_DFS_JP_MIN_PRI              (28500 - 2)
-#define MT_DFS_ETSI_MAX_PRI            (133333 + 125000 + 117647 + 1000)
-#define MT_DFS_ETSI_MIN_PRI            (4500 - 20)
-
-struct mt76x2_radar_specs {
-       u8 mode;
-       u16 avg_len;
-       u16 e_low;
-       u16 e_high;
-       u16 w_low;
-       u16 w_high;
-       u16 w_margin;
-       u32 t_low;
-       u32 t_high;
-       u16 t_margin;
-       u32 b_low;
-       u32 b_high;
-       u32 event_expiration;
-       u16 pwr_jmp;
-};
-
-#define MT_DFS_CHECK_EVENT(x)          ((x) != GENMASK(31, 0))
-#define MT_DFS_EVENT_ENGINE(x)         (((x) & BIT(31)) ? 2 : 0)
-#define MT_DFS_EVENT_TIMESTAMP(x)      ((x) & GENMASK(21, 0))
-#define MT_DFS_EVENT_WIDTH(x)          ((x) & GENMASK(11, 0))
-struct mt76x2_dfs_event {
-       unsigned long fetch_ts;
-       u32 ts;
-       u16 width;
-       u8 engine;
-};
-
-#define MT_DFS_EVENT_BUFLEN            256
-struct mt76x2_dfs_event_rb {
-       struct mt76x2_dfs_event data[MT_DFS_EVENT_BUFLEN];
-       int h_rb, t_rb;
-};
-
-struct mt76x2_dfs_sequence {
-       struct list_head head;
-       u32 first_ts;
-       u32 last_ts;
-       u32 pri;
-       u16 count;
-       u8 engine;
-};
-
-struct mt76x2_dfs_hw_pulse {
-       u8 engine;
-       u32 period;
-       u32 w1;
-       u32 w2;
-       u32 burst;
-};
-
-struct mt76x2_dfs_sw_detector_params {
-       u32 min_pri;
-       u32 max_pri;
-       u32 pri_margin;
-};
-
-struct mt76x2_dfs_engine_stats {
-       u32 hw_pattern;
-       u32 hw_pulse_discarded;
-       u32 sw_pattern;
-};
-
-struct mt76x2_dfs_seq_stats {
-       u32 seq_pool_len;
-       u32 seq_len;
-};
-
-struct mt76x2_dfs_pattern_detector {
-       enum nl80211_dfs_regions region;
-
-       u8 chirp_pulse_cnt;
-       u32 chirp_pulse_ts;
-
-       struct mt76x2_dfs_sw_detector_params sw_dpd_params;
-       struct mt76x2_dfs_event_rb event_rb[2];
-
-       struct list_head sequences;
-       struct list_head seq_pool;
-       struct mt76x2_dfs_seq_stats seq_stats;
-
-       unsigned long last_sw_check;
-       u32 last_event_ts;
-
-       struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
-       struct tasklet_struct dfs_tasklet;
-};
-
-void mt76x2_dfs_init_params(struct mt76x2_dev *dev);
-void mt76x2_dfs_init_detector(struct mt76x2_dev *dev);
-void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev);
-void mt76x2_dfs_set_domain(struct mt76x2_dev *dev,
-                          enum nl80211_dfs_regions region);
-
-#endif /* __MT76x2_DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
deleted file mode 100644 (file)
index 6720a6a..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-#include "mt76x2_dma.h"
-
-int
-mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
-                   struct sk_buff *skb, int cmd, int seq)
-{
-       struct mt76_queue *q = &dev->mt76.q_tx[qid];
-       struct mt76_queue_buf buf;
-       dma_addr_t addr;
-       u32 tx_info;
-
-       tx_info = MT_MCU_MSG_TYPE_CMD |
-                 FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
-                 FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
-                 FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
-                 FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
-
-       addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
-                             DMA_TO_DEVICE);
-       if (dma_mapping_error(dev->mt76.dev, addr))
-               return -ENOMEM;
-
-       buf.addr = addr;
-       buf.len = skb->len;
-       spin_lock_bh(&q->lock);
-       mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
-       mt76_queue_kick(dev, q);
-       spin_unlock_bh(&q->lock);
-
-       return 0;
-}
-
-static int
-mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
-                    int idx, int n_desc)
-{
-       int ret;
-
-       q->regs = dev->mt76.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
-       q->ndesc = n_desc;
-       q->hw_idx = idx;
-
-       ret = mt76_queue_alloc(dev, q);
-       if (ret)
-               return ret;
-
-       mt76x2_irq_enable(dev, MT_INT_TX_DONE(idx));
-
-       return 0;
-}
-
-static int
-mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
-                    int idx, int n_desc, int bufsize)
-{
-       int ret;
-
-       q->regs = dev->mt76.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
-       q->ndesc = n_desc;
-       q->buf_size = bufsize;
-
-       ret = mt76_queue_alloc(dev, q);
-       if (ret)
-               return ret;
-
-       mt76x2_irq_enable(dev, MT_INT_RX_DONE(idx));
-
-       return 0;
-}
-
-static void
-mt76x2_tx_tasklet(unsigned long data)
-{
-       struct mt76x2_dev *dev = (struct mt76x2_dev *) data;
-       int i;
-
-       mt76x2_mac_process_tx_status_fifo(dev);
-
-       for (i = MT_TXQ_MCU; i >= 0; i--)
-               mt76_queue_tx_cleanup(dev, i, false);
-
-       mt76x2_mac_poll_tx_status(dev, false);
-       mt76x2_irq_enable(dev, MT_INT_TX_DONE_ALL);
-}
-
-int mt76x2_dma_init(struct mt76x2_dev *dev)
-{
-       static const u8 wmm_queue_map[] = {
-               [IEEE80211_AC_BE] = 0,
-               [IEEE80211_AC_BK] = 1,
-               [IEEE80211_AC_VI] = 2,
-               [IEEE80211_AC_VO] = 3,
-       };
-       int ret;
-       int i;
-       struct mt76_txwi_cache __maybe_unused *t;
-       struct mt76_queue *q;
-
-       BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x2_txwi));
-       BUILD_BUG_ON(sizeof(struct mt76x2_rxwi) > MT_RX_HEADROOM);
-
-       mt76_dma_attach(&dev->mt76);
-
-       init_waitqueue_head(&dev->mcu.wait);
-       skb_queue_head_init(&dev->mcu.res_q);
-
-       tasklet_init(&dev->tx_tasklet, mt76x2_tx_tasklet, (unsigned long) dev);
-
-       mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
-
-       for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
-               ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[i],
-                                          wmm_queue_map[i], MT_TX_RING_SIZE);
-               if (ret)
-                       return ret;
-       }
-
-       ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
-                                  MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
-       if (ret)
-               return ret;
-
-       ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
-                                  MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
-       if (ret)
-               return ret;
-
-       ret = mt76x2_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
-                                  MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
-       if (ret)
-               return ret;
-
-       q = &dev->mt76.q_rx[MT_RXQ_MAIN];
-       q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x2_rxwi);
-       ret = mt76x2_init_rx_queue(dev, q, 0, MT76x2_RX_RING_SIZE, MT_RX_BUF_SIZE);
-       if (ret)
-               return ret;
-
-       return mt76_init_queues(dev);
-}
-
-void mt76x2_dma_cleanup(struct mt76x2_dev *dev)
-{
-       tasklet_kill(&dev->tx_tasklet);
-       mt76_dma_cleanup(&dev->mt76);
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
deleted file mode 100644 (file)
index da29455..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __MT76x2_DMA_H
-#define __MT76x2_DMA_H
-
-#include "dma.h"
-
-enum mt76x2_qsel {
-       MT_QSEL_MGMT,
-       MT_QSEL_HCCA,
-       MT_QSEL_EDCA,
-       MT_QSEL_EDCA_2,
-};
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
deleted file mode 100644 (file)
index 1753bcb..0000000
+++ /dev/null
@@ -1,669 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <asm/unaligned.h>
-#include "mt76x2.h"
-#include "mt76x2_eeprom.h"
-
-#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
-
-static int
-mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field,
-                  void *dest, int len)
-{
-       if (field + len > dev->mt76.eeprom.size)
-               return -1;
-
-       memcpy(dest, dev->mt76.eeprom.data + field, len);
-       return 0;
-}
-
-static int
-mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
-{
-       void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
-
-       memcpy(dev->mt76.macaddr, src, ETH_ALEN);
-       return 0;
-}
-
-void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
-{
-       u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
-
-       switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
-       case BOARD_TYPE_5GHZ:
-               dev->mt76.cap.has_5ghz = true;
-               break;
-       case BOARD_TYPE_2GHZ:
-               dev->mt76.cap.has_2ghz = true;
-               break;
-       default:
-               dev->mt76.cap.has_2ghz = true;
-               dev->mt76.cap.has_5ghz = true;
-               break;
-       }
-}
-EXPORT_SYMBOL_GPL(mt76x2_eeprom_parse_hw_cap);
-
-static int
-mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
-{
-       u32 val;
-       int i;
-
-       val = mt76_rr(dev, MT_EFUSE_CTRL);
-       val &= ~(MT_EFUSE_CTRL_AIN |
-                MT_EFUSE_CTRL_MODE);
-       val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
-       val |= MT_EFUSE_CTRL_KICK;
-       mt76_wr(dev, MT_EFUSE_CTRL, val);
-
-       if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
-               return -ETIMEDOUT;
-
-       udelay(2);
-
-       val = mt76_rr(dev, MT_EFUSE_CTRL);
-       if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
-               memset(data, 0xff, 16);
-               return 0;
-       }
-
-       for (i = 0; i < 4; i++) {
-               val = mt76_rr(dev, MT_EFUSE_DATA(i));
-               put_unaligned_le32(val, data + 4 * i);
-       }
-
-       return 0;
-}
-
-static int
-mt76x2_get_efuse_data(struct mt76x2_dev *dev, void *buf, int len)
-{
-       int ret, i;
-
-       for (i = 0; i + 16 <= len; i += 16) {
-               ret = mt76x2_efuse_read(dev, i, buf + i);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static bool
-mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
-{
-       u16 *efuse_w = (u16 *) efuse;
-
-       if (efuse_w[MT_EE_NIC_CONF_0] != 0)
-               return false;
-
-       if (efuse_w[MT_EE_XTAL_TRIM_1] == 0xffff)
-               return false;
-
-       if (efuse_w[MT_EE_TX_POWER_DELTA_BW40] != 0)
-               return false;
-
-       if (efuse_w[MT_EE_TX_POWER_0_START_2G] == 0xffff)
-               return false;
-
-       if (efuse_w[MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA] != 0)
-               return false;
-
-       if (efuse_w[MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE] == 0xffff)
-               return false;
-
-       return true;
-}
-
-static void
-mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
-{
-#define GROUP_5G(_id)                                                     \
-       MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id),     \
-       MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1, \
-       MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id),     \
-       MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1
-
-       static const u8 cal_free_bytes[] = {
-               MT_EE_XTAL_TRIM_1,
-               MT_EE_TX_POWER_EXT_PA_5G + 1,
-               MT_EE_TX_POWER_0_START_2G,
-               MT_EE_TX_POWER_0_START_2G + 1,
-               MT_EE_TX_POWER_1_START_2G,
-               MT_EE_TX_POWER_1_START_2G + 1,
-               GROUP_5G(0),
-               GROUP_5G(1),
-               GROUP_5G(2),
-               GROUP_5G(3),
-               GROUP_5G(4),
-               GROUP_5G(5),
-               MT_EE_RF_2G_TSSI_OFF_TXPOWER,
-               MT_EE_RF_2G_RX_HIGH_GAIN + 1,
-               MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN,
-               MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN + 1,
-               MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN,
-               MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN + 1,
-               MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN,
-               MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN + 1,
-       };
-       u8 *eeprom = dev->mt76.eeprom.data;
-       u8 prev_grp0[4] = {
-               eeprom[MT_EE_TX_POWER_0_START_5G],
-               eeprom[MT_EE_TX_POWER_0_START_5G + 1],
-               eeprom[MT_EE_TX_POWER_1_START_5G],
-               eeprom[MT_EE_TX_POWER_1_START_5G + 1]
-       };
-       u16 val;
-       int i;
-
-       if (!mt76x2_has_cal_free_data(dev, efuse))
-               return;
-
-       for (i = 0; i < ARRAY_SIZE(cal_free_bytes); i++) {
-               int offset = cal_free_bytes[i];
-
-               eeprom[offset] = efuse[offset];
-       }
-
-       if (!(efuse[MT_EE_TX_POWER_0_START_5G] |
-             efuse[MT_EE_TX_POWER_0_START_5G + 1]))
-               memcpy(eeprom + MT_EE_TX_POWER_0_START_5G, prev_grp0, 2);
-       if (!(efuse[MT_EE_TX_POWER_1_START_5G] |
-             efuse[MT_EE_TX_POWER_1_START_5G + 1]))
-               memcpy(eeprom + MT_EE_TX_POWER_1_START_5G, prev_grp0 + 2, 2);
-
-       val = get_unaligned_le16(efuse + MT_EE_BT_RCAL_RESULT);
-       if (val != 0xffff)
-               eeprom[MT_EE_BT_RCAL_RESULT] = val & 0xff;
-
-       val = get_unaligned_le16(efuse + MT_EE_BT_VCDL_CALIBRATION);
-       if (val != 0xffff)
-               eeprom[MT_EE_BT_VCDL_CALIBRATION + 1] = val >> 8;
-
-       val = get_unaligned_le16(efuse + MT_EE_BT_PMUCFG);
-       if (val != 0xffff)
-               eeprom[MT_EE_BT_PMUCFG] = val & 0xff;
-}
-
-static int mt76x2_check_eeprom(struct mt76x2_dev *dev)
-{
-       u16 val = get_unaligned_le16(dev->mt76.eeprom.data);
-
-       if (!val)
-               val = get_unaligned_le16(dev->mt76.eeprom.data + MT_EE_PCI_ID);
-
-       switch (val) {
-       case 0x7662:
-       case 0x7612:
-               return 0;
-       default:
-               dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n", val);
-               return -EINVAL;
-       }
-}
-
-static int
-mt76x2_eeprom_load(struct mt76x2_dev *dev)
-{
-       void *efuse;
-       bool found;
-       int ret;
-
-       ret = mt76_eeprom_init(&dev->mt76, MT7662_EEPROM_SIZE);
-       if (ret < 0)
-               return ret;
-
-       found = ret;
-       if (found)
-               found = !mt76x2_check_eeprom(dev);
-
-       dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, MT7662_EEPROM_SIZE,
-                                         GFP_KERNEL);
-       dev->mt76.otp.size = MT7662_EEPROM_SIZE;
-       if (!dev->mt76.otp.data)
-               return -ENOMEM;
-
-       efuse = dev->mt76.otp.data;
-
-       if (mt76x2_get_efuse_data(dev, efuse, MT7662_EEPROM_SIZE))
-               goto out;
-
-       if (found) {
-               mt76x2_apply_cal_free_data(dev, efuse);
-       } else {
-               /* FIXME: check if efuse data is complete */
-               found = true;
-               memcpy(dev->mt76.eeprom.data, efuse, MT7662_EEPROM_SIZE);
-       }
-
-out:
-       if (!found)
-               return -ENOENT;
-
-       return 0;
-}
-
-static inline int
-mt76x2_sign_extend(u32 val, unsigned int size)
-{
-       bool sign = val & BIT(size - 1);
-
-       val &= BIT(size - 1) - 1;
-
-       return sign ? val : -val;
-}
-
-static inline int
-mt76x2_sign_extend_optional(u32 val, unsigned int size)
-{
-       bool enable = val & BIT(size);
-
-       return enable ? mt76x2_sign_extend(val, size) : 0;
-}
-
-static bool
-field_valid(u8 val)
-{
-       return val != 0 && val != 0xff;
-}
-
-static void
-mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val)
-{
-       s8 *dest = dev->cal.rx.high_gain;
-
-       if (!field_valid(val)) {
-               dest[0] = 0;
-               dest[1] = 0;
-               return;
-       }
-
-       dest[0] = mt76x2_sign_extend(val, 4);
-       dest[1] = mt76x2_sign_extend(val >> 4, 4);
-}
-
-static void
-mt76x2_set_rssi_offset(struct mt76x2_dev *dev, int chain, u8 val)
-{
-       s8 *dest = dev->cal.rx.rssi_offset;
-
-       if (!field_valid(val)) {
-               dest[chain] = 0;
-               return;
-       }
-
-       dest[chain] = mt76x2_sign_extend_optional(val, 7);
-}
-
-static enum mt76x2_cal_channel_group
-mt76x2_get_cal_channel_group(int channel)
-{
-       if (channel >= 184 && channel <= 196)
-               return MT_CH_5G_JAPAN;
-       if (channel <= 48)
-               return MT_CH_5G_UNII_1;
-       if (channel <= 64)
-               return MT_CH_5G_UNII_2;
-       if (channel <= 114)
-               return MT_CH_5G_UNII_2E_1;
-       if (channel <= 144)
-               return MT_CH_5G_UNII_2E_2;
-       return MT_CH_5G_UNII_3;
-}
-
-static u8
-mt76x2_get_5g_rx_gain(struct mt76x2_dev *dev, u8 channel)
-{
-       enum mt76x2_cal_channel_group group;
-
-       group = mt76x2_get_cal_channel_group(channel);
-       switch (group) {
-       case MT_CH_5G_JAPAN:
-               return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
-       case MT_CH_5G_UNII_1:
-               return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
-       case MT_CH_5G_UNII_2:
-               return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
-       case MT_CH_5G_UNII_2E_1:
-               return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
-       case MT_CH_5G_UNII_2E_2:
-               return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
-       default:
-               return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
-       }
-}
-
-void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
-{
-       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-       int channel = chan->hw_value;
-       s8 lna_5g[3], lna_2g;
-       u8 lna;
-       u16 val;
-
-       if (chan->band == NL80211_BAND_2GHZ)
-               val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
-       else
-               val = mt76x2_get_5g_rx_gain(dev, channel);
-
-       mt76x2_set_rx_gain_group(dev, val);
-
-       if (chan->band == NL80211_BAND_2GHZ) {
-               val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0);
-               mt76x2_set_rssi_offset(dev, 0, val);
-               mt76x2_set_rssi_offset(dev, 1, val >> 8);
-       } else {
-               val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0);
-               mt76x2_set_rssi_offset(dev, 0, val);
-               mt76x2_set_rssi_offset(dev, 1, val >> 8);
-       }
-
-       val = mt76x2_eeprom_get(dev, MT_EE_LNA_GAIN);
-       lna_2g = val & 0xff;
-       lna_5g[0] = val >> 8;
-
-       val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1);
-       lna_5g[1] = val >> 8;
-
-       val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1);
-       lna_5g[2] = val >> 8;
-
-       if (!field_valid(lna_5g[1]))
-               lna_5g[1] = lna_5g[0];
-
-       if (!field_valid(lna_5g[2]))
-               lna_5g[2] = lna_5g[0];
-
-       dev->cal.rx.mcu_gain =  (lna_2g & 0xff);
-       dev->cal.rx.mcu_gain |= (lna_5g[0] & 0xff) << 8;
-       dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
-       dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
-
-       val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
-       if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
-               lna_2g = 0;
-       if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
-               memset(lna_5g, 0, sizeof(lna_5g));
-
-       if (chan->band == NL80211_BAND_2GHZ)
-               lna = lna_2g;
-       else if (channel <= 64)
-               lna = lna_5g[0];
-       else if (channel <= 128)
-               lna = lna_5g[1];
-       else
-               lna = lna_5g[2];
-
-       if (lna == 0xff)
-               lna = 0;
-
-       dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
-}
-EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
-
-static s8
-mt76x2_rate_power_val(u8 val)
-{
-       if (!field_valid(val))
-               return 0;
-
-       return mt76x2_sign_extend_optional(val, 7);
-}
-
-void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
-                          struct ieee80211_channel *chan)
-{
-       bool is_5ghz;
-       u16 val;
-
-       is_5ghz = chan->band == NL80211_BAND_5GHZ;
-
-       memset(t, 0, sizeof(*t));
-
-       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_CCK);
-       t->cck[0] = t->cck[1] = mt76x2_rate_power_val(val);
-       t->cck[2] = t->cck[3] = mt76x2_rate_power_val(val >> 8);
-
-       if (is_5ghz)
-               val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
-       else
-               val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
-       t->ofdm[0] = t->ofdm[1] = mt76x2_rate_power_val(val);
-       t->ofdm[2] = t->ofdm[3] = mt76x2_rate_power_val(val >> 8);
-
-       if (is_5ghz)
-               val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
-       else
-               val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
-       t->ofdm[4] = t->ofdm[5] = mt76x2_rate_power_val(val);
-       t->ofdm[6] = t->ofdm[7] = mt76x2_rate_power_val(val >> 8);
-
-       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
-       t->ht[0] = t->ht[1] = mt76x2_rate_power_val(val);
-       t->ht[2] = t->ht[3] = mt76x2_rate_power_val(val >> 8);
-
-       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
-       t->ht[4] = t->ht[5] = mt76x2_rate_power_val(val);
-       t->ht[6] = t->ht[7] = mt76x2_rate_power_val(val >> 8);
-
-       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
-       t->ht[8] = t->ht[9] = mt76x2_rate_power_val(val);
-       t->ht[10] = t->ht[11] = mt76x2_rate_power_val(val >> 8);
-
-       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
-       t->ht[12] = t->ht[13] = mt76x2_rate_power_val(val);
-       t->ht[14] = t->ht[15] = mt76x2_rate_power_val(val >> 8);
-
-       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
-       t->vht[0] = t->vht[1] = mt76x2_rate_power_val(val);
-       t->vht[2] = t->vht[3] = mt76x2_rate_power_val(val >> 8);
-
-       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
-       t->vht[4] = t->vht[5] = mt76x2_rate_power_val(val);
-       t->vht[6] = t->vht[7] = mt76x2_rate_power_val(val >> 8);
-
-       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
-       if (!is_5ghz)
-               val >>= 8;
-       t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
-}
-EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
-
-int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
-{
-       int i;
-       s8 ret = 0;
-
-       for (i = 0; i < sizeof(r->all); i++)
-               ret = max(ret, r->all[i]);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(mt76x2_get_max_rate_power);
-
-static void
-mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
-                        struct ieee80211_channel *chan, int chain, int offset)
-{
-       int channel = chan->hw_value;
-       int delta_idx;
-       u8 data[6];
-       u16 val;
-
-       if (channel < 6)
-               delta_idx = 3;
-       else if (channel < 11)
-               delta_idx = 4;
-       else
-               delta_idx = 5;
-
-       mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
-
-       t->chain[chain].tssi_slope = data[0];
-       t->chain[chain].tssi_offset = data[1];
-       t->chain[chain].target_power = data[2];
-       t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
-
-       val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
-       t->target_power = val >> 8;
-}
-
-static void
-mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
-                        struct ieee80211_channel *chan, int chain, int offset)
-{
-       int channel = chan->hw_value;
-       enum mt76x2_cal_channel_group group;
-       int delta_idx;
-       u16 val;
-       u8 data[5];
-
-       group = mt76x2_get_cal_channel_group(channel);
-       offset += group * MT_TX_POWER_GROUP_SIZE_5G;
-
-       if (channel >= 192)
-               delta_idx = 4;
-       else if (channel >= 184)
-               delta_idx = 3;
-       else if (channel < 44)
-               delta_idx = 3;
-       else if (channel < 52)
-               delta_idx = 4;
-       else if (channel < 58)
-               delta_idx = 3;
-       else if (channel < 98)
-               delta_idx = 4;
-       else if (channel < 106)
-               delta_idx = 3;
-       else if (channel < 116)
-               delta_idx = 4;
-       else if (channel < 130)
-               delta_idx = 3;
-       else if (channel < 149)
-               delta_idx = 4;
-       else if (channel < 157)
-               delta_idx = 3;
-       else
-               delta_idx = 4;
-
-       mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
-
-       t->chain[chain].tssi_slope = data[0];
-       t->chain[chain].tssi_offset = data[1];
-       t->chain[chain].target_power = data[2];
-       t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
-
-       val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
-       t->target_power = val & 0xff;
-}
-
-void mt76x2_get_power_info(struct mt76x2_dev *dev,
-                          struct mt76x2_tx_power_info *t,
-                          struct ieee80211_channel *chan)
-{
-       u16 bw40, bw80;
-
-       memset(t, 0, sizeof(*t));
-
-       bw40 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
-       bw80 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
-
-       if (chan->band == NL80211_BAND_5GHZ) {
-               bw40 >>= 8;
-               mt76x2_get_power_info_5g(dev, t, chan, 0,
-                                        MT_EE_TX_POWER_0_START_5G);
-               mt76x2_get_power_info_5g(dev, t, chan, 1,
-                                        MT_EE_TX_POWER_1_START_5G);
-       } else {
-               mt76x2_get_power_info_2g(dev, t, chan, 0,
-                                        MT_EE_TX_POWER_0_START_2G);
-               mt76x2_get_power_info_2g(dev, t, chan, 1,
-                                        MT_EE_TX_POWER_1_START_2G);
-       }
-
-       if (mt76x2_tssi_enabled(dev) || !field_valid(t->target_power))
-               t->target_power = t->chain[0].target_power;
-
-       t->delta_bw40 = mt76x2_rate_power_val(bw40);
-       t->delta_bw80 = mt76x2_rate_power_val(bw80);
-}
-EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
-
-int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
-{
-       enum nl80211_band band = dev->mt76.chandef.chan->band;
-       u16 val, slope;
-       u8 bounds;
-
-       memset(t, 0, sizeof(*t));
-
-       if (!mt76x2_temp_tx_alc_enabled(dev))
-               return -EINVAL;
-
-       if (!mt76x2_ext_pa_enabled(dev, band))
-               return -EINVAL;
-
-       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
-       t->temp_25_ref = val & 0x7f;
-       if (band == NL80211_BAND_5GHZ) {
-               slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
-               bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
-       } else {
-               slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
-               bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80) >> 8;
-       }
-
-       t->high_slope = slope & 0xff;
-       t->low_slope = slope >> 8;
-       t->lower_bound = 0 - (bounds & 0xf);
-       t->upper_bound = (bounds >> 4) & 0xf;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp);
-
-bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
-{
-       u16 conf0 = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
-
-       if (band == NL80211_BAND_5GHZ)
-               return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G);
-       else
-               return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
-}
-EXPORT_SYMBOL_GPL(mt76x2_ext_pa_enabled);
-
-int mt76x2_eeprom_init(struct mt76x2_dev *dev)
-{
-       int ret;
-
-       ret = mt76x2_eeprom_load(dev);
-       if (ret)
-               return ret;
-
-       mt76x2_eeprom_parse_hw_cap(dev);
-       mt76x2_eeprom_get_macaddr(dev);
-       mt76_eeprom_override(&dev->mt76);
-       dev->mt76.macaddr[0] &= ~BIT(1);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
-
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
deleted file mode 100644 (file)
index 0f3e4d2..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __MT76x2_EEPROM_H
-#define __MT76x2_EEPROM_H
-
-#include "mt76x2.h"
-
-enum mt76x2_eeprom_field {
-       MT_EE_CHIP_ID =                         0x000,
-       MT_EE_VERSION =                         0x002,
-       MT_EE_MAC_ADDR =                        0x004,
-       MT_EE_PCI_ID =                          0x00A,
-       MT_EE_NIC_CONF_0 =                      0x034,
-       MT_EE_NIC_CONF_1 =                      0x036,
-       MT_EE_NIC_CONF_2 =                      0x042,
-
-       MT_EE_XTAL_TRIM_1 =                     0x03a,
-       MT_EE_XTAL_TRIM_2 =                     0x09e,
-
-       MT_EE_LNA_GAIN =                        0x044,
-       MT_EE_RSSI_OFFSET_2G_0 =                0x046,
-       MT_EE_RSSI_OFFSET_2G_1 =                0x048,
-       MT_EE_RSSI_OFFSET_5G_0 =                0x04a,
-       MT_EE_RSSI_OFFSET_5G_1 =                0x04c,
-
-       MT_EE_TX_POWER_DELTA_BW40 =             0x050,
-       MT_EE_TX_POWER_DELTA_BW80 =             0x052,
-
-       MT_EE_TX_POWER_EXT_PA_5G =              0x054,
-
-       MT_EE_TX_POWER_0_START_2G =             0x056,
-       MT_EE_TX_POWER_1_START_2G =             0x05c,
-
-       /* used as byte arrays */
-#define MT_TX_POWER_GROUP_SIZE_5G              5
-#define MT_TX_POWER_GROUPS_5G                  6
-       MT_EE_TX_POWER_0_START_5G =             0x062,
-
-       MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA =  0x074,
-       MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE =      0x076,
-
-       MT_EE_TX_POWER_1_START_5G =             0x080,
-
-       MT_EE_TX_POWER_CCK =                    0x0a0,
-       MT_EE_TX_POWER_OFDM_2G_6M =             0x0a2,
-       MT_EE_TX_POWER_OFDM_2G_24M =            0x0a4,
-       MT_EE_TX_POWER_OFDM_5G_6M =             0x0b2,
-       MT_EE_TX_POWER_OFDM_5G_24M =            0x0b4,
-       MT_EE_TX_POWER_HT_MCS0 =                0x0a6,
-       MT_EE_TX_POWER_HT_MCS4 =                0x0a8,
-       MT_EE_TX_POWER_HT_MCS8 =                0x0aa,
-       MT_EE_TX_POWER_HT_MCS12 =               0x0ac,
-       MT_EE_TX_POWER_VHT_MCS0 =               0x0ba,
-       MT_EE_TX_POWER_VHT_MCS4 =               0x0bc,
-       MT_EE_TX_POWER_VHT_MCS8 =               0x0be,
-
-       MT_EE_RF_TEMP_COMP_SLOPE_5G =           0x0f2,
-       MT_EE_RF_TEMP_COMP_SLOPE_2G =           0x0f4,
-
-       MT_EE_RF_2G_TSSI_OFF_TXPOWER =          0x0f6,
-       MT_EE_RF_2G_RX_HIGH_GAIN =              0x0f8,
-       MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN =       0x0fa,
-       MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN =       0x0fc,
-       MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN =       0x0fe,
-
-       MT_EE_BT_RCAL_RESULT =                  0x138,
-       MT_EE_BT_VCDL_CALIBRATION =             0x13c,
-       MT_EE_BT_PMUCFG =                       0x13e,
-
-       __MT_EE_MAX
-};
-
-#define MT_EE_NIC_CONF_0_PA_INT_2G             BIT(8)
-#define MT_EE_NIC_CONF_0_PA_INT_5G             BIT(9)
-#define MT_EE_NIC_CONF_0_BOARD_TYPE            GENMASK(13, 12)
-
-#define MT_EE_NIC_CONF_1_TEMP_TX_ALC           BIT(1)
-#define MT_EE_NIC_CONF_1_LNA_EXT_2G            BIT(2)
-#define MT_EE_NIC_CONF_1_LNA_EXT_5G            BIT(3)
-#define MT_EE_NIC_CONF_1_TX_ALC_EN             BIT(13)
-
-#define MT_EE_NIC_CONF_2_RX_STREAM             GENMASK(3, 0)
-#define MT_EE_NIC_CONF_2_TX_STREAM             GENMASK(7, 4)
-#define MT_EE_NIC_CONF_2_HW_ANTDIV             BIT(8)
-#define MT_EE_NIC_CONF_2_XTAL_OPTION           GENMASK(10, 9)
-#define MT_EE_NIC_CONF_2_TEMP_DISABLE          BIT(11)
-#define MT_EE_NIC_CONF_2_COEX_METHOD           GENMASK(15, 13)
-
-enum mt76x2_board_type {
-       BOARD_TYPE_2GHZ = 1,
-       BOARD_TYPE_5GHZ = 2,
-};
-
-enum mt76x2_cal_channel_group {
-       MT_CH_5G_JAPAN,
-       MT_CH_5G_UNII_1,
-       MT_CH_5G_UNII_2,
-       MT_CH_5G_UNII_2E_1,
-       MT_CH_5G_UNII_2E_2,
-       MT_CH_5G_UNII_3,
-       __MT_CH_MAX
-};
-
-struct mt76x2_tx_power_info {
-       u8 target_power;
-
-       s8 delta_bw40;
-       s8 delta_bw80;
-
-       struct {
-               s8 tssi_slope;
-               s8 tssi_offset;
-               s8 target_power;
-               s8 delta;
-       } chain[MT_MAX_CHAINS];
-};
-
-struct mt76x2_temp_comp {
-       u8 temp_25_ref;
-       int lower_bound; /* J */
-       int upper_bound; /* J */
-       unsigned int high_slope; /* J / dB */
-       unsigned int low_slope; /* J / dB */
-};
-
-static inline int
-mt76x2_eeprom_get(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field)
-{
-       if ((field & 1) || field >= __MT_EE_MAX)
-               return -1;
-
-       return get_unaligned_le16(dev->mt76.eeprom.data + field);
-}
-
-void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
-                          struct ieee80211_channel *chan);
-int mt76x2_get_max_rate_power(struct mt76_rate_power *r);
-void mt76x2_get_power_info(struct mt76x2_dev *dev,
-                          struct mt76x2_tx_power_info *t,
-                          struct ieee80211_channel *chan);
-int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
-bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
-void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
-void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev);
-
-static inline bool
-mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
-{
-       u16 val;
-
-       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
-       if (!(val & BIT(15)))
-               return false;
-
-       return mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
-              MT_EE_NIC_CONF_1_TEMP_TX_ALC;
-}
-
-static inline bool
-mt76x2_tssi_enabled(struct mt76x2_dev *dev)
-{
-       return !mt76x2_temp_tx_alc_enabled(dev) &&
-              (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
-               MT_EE_NIC_CONF_1_TX_ALC_EN);
-}
-
-static inline bool
-mt76x2_has_ext_lna(struct mt76x2_dev *dev)
-{
-       u32 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
-
-       if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
-               return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
-       else
-               return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
-}
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
deleted file mode 100644 (file)
index b814391..0000000
+++ /dev/null
@@ -1,603 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include "mt76x2.h"
-#include "mt76x2_eeprom.h"
-#include "mt76x2_mcu.h"
-
-static void
-mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
-{
-       u32 val;
-
-       val = MT_PBF_SYS_CTRL_MCU_RESET |
-             MT_PBF_SYS_CTRL_DMA_RESET |
-             MT_PBF_SYS_CTRL_MAC_RESET |
-             MT_PBF_SYS_CTRL_PBF_RESET |
-             MT_PBF_SYS_CTRL_ASY_RESET;
-
-       mt76_set(dev, MT_PBF_SYS_CTRL, val);
-       mt76_clear(dev, MT_PBF_SYS_CTRL, val);
-
-       mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
-       mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
-}
-
-static void
-mt76x2_fixup_xtal(struct mt76x2_dev *dev)
-{
-       u16 eep_val;
-       s8 offset = 0;
-
-       eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
-
-       offset = eep_val & 0x7f;
-       if ((eep_val & 0xff) == 0xff)
-               offset = 0;
-       else if (eep_val & 0x80)
-               offset = 0 - offset;
-
-       eep_val >>= 8;
-       if (eep_val == 0x00 || eep_val == 0xff) {
-               eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
-               eep_val &= 0xff;
-
-               if (eep_val == 0x00 || eep_val == 0xff)
-                       eep_val = 0x14;
-       }
-
-       eep_val &= 0x7f;
-       mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
-       mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
-
-       eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
-       switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
-       case 0:
-               mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
-               break;
-       case 1:
-               mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
-               break;
-       default:
-               break;
-       }
-}
-
-static void
-mt76x2_init_beacon_offsets(struct mt76x2_dev *dev)
-{
-       u16 base = MT_BEACON_BASE;
-       u32 regs[4] = {};
-       int i;
-
-       for (i = 0; i < 16; i++) {
-               u16 addr = dev->beacon_offsets[i];
-
-               regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
-       }
-
-       for (i = 0; i < 4; i++)
-               mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
-}
-
-static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
-{
-       static const u8 null_addr[ETH_ALEN] = {};
-       const u8 *macaddr = dev->mt76.macaddr;
-       u32 val;
-       int i, k;
-
-       if (!mt76x2_wait_for_mac(dev))
-               return -ETIMEDOUT;
-
-       val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
-
-       val &= ~(MT_WPDMA_GLO_CFG_TX_DMA_EN |
-                MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
-                MT_WPDMA_GLO_CFG_RX_DMA_EN |
-                MT_WPDMA_GLO_CFG_RX_DMA_BUSY |
-                MT_WPDMA_GLO_CFG_DMA_BURST_SIZE);
-       val |= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3);
-
-       mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
-
-       mt76x2_mac_pbf_init(dev);
-       mt76_write_mac_initvals(dev);
-       mt76x2_fixup_xtal(dev);
-
-       mt76_clear(dev, MT_MAC_SYS_CTRL,
-                  MT_MAC_SYS_CTRL_RESET_CSR |
-                  MT_MAC_SYS_CTRL_RESET_BBP);
-
-       if (is_mt7612(dev))
-               mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
-
-       mt76_set(dev, MT_EXT_CCA_CFG, 0x0000f000);
-       mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
-
-       mt76_wr(dev, MT_RF_BYPASS_0, 0x06000000);
-       mt76_wr(dev, MT_RF_SETTING_0, 0x08800000);
-       usleep_range(5000, 10000);
-       mt76_wr(dev, MT_RF_BYPASS_0, 0x00000000);
-
-       mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401);
-       mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
-
-       mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr));
-       mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4));
-
-       mt76_wr(dev, MT_MAC_BSSID_DW0, get_unaligned_le32(macaddr));
-       mt76_wr(dev, MT_MAC_BSSID_DW1, get_unaligned_le16(macaddr + 4) |
-               FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */
-               MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
-
-       /* Fire a pre-TBTT interrupt 8 ms before TBTT */
-       mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
-                      8 << 4);
-       mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
-                      MT_DFS_GP_INTERVAL);
-       mt76_wr(dev, MT_INT_TIMER_EN, 0);
-
-       mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
-       if (!hard)
-               return 0;
-
-       for (i = 0; i < 256 / 32; i++)
-               mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
-
-       for (i = 0; i < 256; i++)
-               mt76x2_mac_wcid_setup(dev, i, 0, NULL);
-
-       for (i = 0; i < MT_MAX_VIFS; i++)
-               mt76x2_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL);
-
-       for (i = 0; i < 16; i++)
-               for (k = 0; k < 4; k++)
-                       mt76x2_mac_shared_key_setup(dev, i, k, NULL);
-
-       for (i = 0; i < 8; i++) {
-               mt76x2_mac_set_bssid(dev, i, null_addr);
-               mt76x2_mac_set_beacon(dev, i, NULL);
-       }
-
-       for (i = 0; i < 16; i++)
-               mt76_rr(dev, MT_TX_STAT_FIFO);
-
-       mt76_wr(dev, MT_CH_TIME_CFG,
-               MT_CH_TIME_CFG_TIMER_EN |
-               MT_CH_TIME_CFG_TX_AS_BUSY |
-               MT_CH_TIME_CFG_RX_AS_BUSY |
-               MT_CH_TIME_CFG_NAV_AS_BUSY |
-               MT_CH_TIME_CFG_EIFS_AS_BUSY |
-               FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
-
-       mt76x2_init_beacon_offsets(dev);
-
-       mt76x2_set_tx_ackto(dev);
-
-       return 0;
-}
-
-int mt76x2_mac_start(struct mt76x2_dev *dev)
-{
-       int i;
-
-       for (i = 0; i < 16; i++)
-               mt76_rr(dev, MT_TX_AGG_CNT(i));
-
-       for (i = 0; i < 16; i++)
-               mt76_rr(dev, MT_TX_STAT_FIFO);
-
-       memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats));
-
-       mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
-       wait_for_wpdma(dev);
-       usleep_range(50, 100);
-
-       mt76_set(dev, MT_WPDMA_GLO_CFG,
-                MT_WPDMA_GLO_CFG_TX_DMA_EN |
-                MT_WPDMA_GLO_CFG_RX_DMA_EN);
-
-       mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
-
-       mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
-       mt76_wr(dev, MT_MAC_SYS_CTRL,
-               MT_MAC_SYS_CTRL_ENABLE_TX |
-               MT_MAC_SYS_CTRL_ENABLE_RX);
-
-       mt76x2_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
-                              MT_INT_TX_STAT);
-
-       return 0;
-}
-
-void mt76x2_mac_resume(struct mt76x2_dev *dev)
-{
-       mt76_wr(dev, MT_MAC_SYS_CTRL,
-               MT_MAC_SYS_CTRL_ENABLE_TX |
-               MT_MAC_SYS_CTRL_ENABLE_RX);
-}
-
-static void
-mt76x2_power_on_rf_patch(struct mt76x2_dev *dev)
-{
-       mt76_set(dev, 0x10130, BIT(0) | BIT(16));
-       udelay(1);
-
-       mt76_clear(dev, 0x1001c, 0xff);
-       mt76_set(dev, 0x1001c, 0x30);
-
-       mt76_wr(dev, 0x10014, 0x484f);
-       udelay(1);
-
-       mt76_set(dev, 0x10130, BIT(17));
-       udelay(125);
-
-       mt76_clear(dev, 0x10130, BIT(16));
-       udelay(50);
-
-       mt76_set(dev, 0x1014c, BIT(19) | BIT(20));
-}
-
-static void
-mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit)
-{
-       int shift = unit ? 8 : 0;
-
-       /* Enable RF BG */
-       mt76_set(dev, 0x10130, BIT(0) << shift);
-       udelay(10);
-
-       /* Enable RFDIG LDO/AFE/ABB/ADDA */
-       mt76_set(dev, 0x10130, (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift);
-       udelay(10);
-
-       /* Switch RFDIG power to internal LDO */
-       mt76_clear(dev, 0x10130, BIT(2) << shift);
-       udelay(10);
-
-       mt76x2_power_on_rf_patch(dev);
-
-       mt76_set(dev, 0x530, 0xf);
-}
-
-static void
-mt76x2_power_on(struct mt76x2_dev *dev)
-{
-       u32 val;
-
-       /* Turn on WL MTCMOS */
-       mt76_set(dev, MT_WLAN_MTC_CTRL, MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
-
-       val = MT_WLAN_MTC_CTRL_STATE_UP |
-             MT_WLAN_MTC_CTRL_PWR_ACK |
-             MT_WLAN_MTC_CTRL_PWR_ACK_S;
-
-       mt76_poll(dev, MT_WLAN_MTC_CTRL, val, val, 1000);
-
-       mt76_clear(dev, MT_WLAN_MTC_CTRL, 0x7f << 16);
-       udelay(10);
-
-       mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
-       udelay(10);
-
-       mt76_set(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
-       mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xfff);
-
-       /* Turn on AD/DA power down */
-       mt76_clear(dev, 0x11204, BIT(3));
-
-       /* WLAN function enable */
-       mt76_set(dev, 0x10080, BIT(0));
-
-       /* Release BBP software reset */
-       mt76_clear(dev, 0x10064, BIT(18));
-
-       mt76x2_power_on_rf(dev, 0);
-       mt76x2_power_on_rf(dev, 1);
-}
-
-void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
-{
-       u8 ackto, sifs, slottime = dev->slottime;
-
-       /* As defined by IEEE 802.11-2007 17.3.8.6 */
-       slottime += 3 * dev->coverage_class;
-       mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
-                      MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
-
-       sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG,
-                             MT_XIFS_TIME_CFG_OFDM_SIFS);
-
-       ackto = slottime + sifs;
-       mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG,
-                      MT_TX_TIMEOUT_CFG_ACKTO, ackto);
-}
-
-int mt76x2_init_hardware(struct mt76x2_dev *dev)
-{
-       static const u16 beacon_offsets[16] = {
-               /* 1024 byte per beacon */
-               0xc000,
-               0xc400,
-               0xc800,
-               0xcc00,
-               0xd000,
-               0xd400,
-               0xd800,
-               0xdc00,
-
-               /* BSS idx 8-15 not used for beacons */
-               0xc000,
-               0xc000,
-               0xc000,
-               0xc000,
-               0xc000,
-               0xc000,
-               0xc000,
-               0xc000,
-       };
-       u32 val;
-       int ret;
-
-       dev->beacon_offsets = beacon_offsets;
-       tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
-                    (unsigned long) dev);
-
-       val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
-       val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
-              MT_WPDMA_GLO_CFG_BIG_ENDIAN |
-              MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
-       val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
-       mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
-
-       mt76x2_reset_wlan(dev, true);
-       mt76x2_power_on(dev);
-
-       ret = mt76x2_eeprom_init(dev);
-       if (ret)
-               return ret;
-
-       ret = mt76x2_mac_reset(dev, true);
-       if (ret)
-               return ret;
-
-       dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
-
-       ret = mt76x2_dma_init(dev);
-       if (ret)
-               return ret;
-
-       set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
-       ret = mt76x2_mac_start(dev);
-       if (ret)
-               return ret;
-
-       ret = mt76x2_mcu_init(dev);
-       if (ret)
-               return ret;
-
-       mt76x2_mac_stop(dev, false);
-
-       return 0;
-}
-
-void mt76x2_stop_hardware(struct mt76x2_dev *dev)
-{
-       cancel_delayed_work_sync(&dev->cal_work);
-       cancel_delayed_work_sync(&dev->mac_work);
-       mt76x2_mcu_set_radio_state(dev, false);
-       mt76x2_mac_stop(dev, false);
-}
-
-void mt76x2_cleanup(struct mt76x2_dev *dev)
-{
-       tasklet_disable(&dev->dfs_pd.dfs_tasklet);
-       tasklet_disable(&dev->pre_tbtt_tasklet);
-       mt76x2_stop_hardware(dev);
-       mt76x2_dma_cleanup(dev);
-       mt76x2_mcu_cleanup(dev);
-}
-
-struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
-{
-       static const struct mt76_driver_ops drv_ops = {
-               .txwi_size = sizeof(struct mt76x2_txwi),
-               .update_survey = mt76x2_update_channel,
-               .tx_prepare_skb = mt76x2_tx_prepare_skb,
-               .tx_complete_skb = mt76x2_tx_complete_skb,
-               .rx_skb = mt76x2_queue_rx_skb,
-               .rx_poll_complete = mt76x2_rx_poll_complete,
-               .sta_ps = mt76x2_sta_ps,
-       };
-       struct mt76x2_dev *dev;
-       struct mt76_dev *mdev;
-
-       mdev = mt76_alloc_device(sizeof(*dev), &mt76x2_ops);
-       if (!mdev)
-               return NULL;
-
-       dev = container_of(mdev, struct mt76x2_dev, mt76);
-       mdev->dev = pdev;
-       mdev->drv = &drv_ops;
-       mutex_init(&dev->mutex);
-       spin_lock_init(&dev->irq_lock);
-
-       return dev;
-}
-
-static void mt76x2_regd_notifier(struct wiphy *wiphy,
-                                struct regulatory_request *request)
-{
-       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
-       struct mt76x2_dev *dev = hw->priv;
-
-       mt76x2_dfs_set_domain(dev, request->dfs_region);
-}
-
-static const struct ieee80211_iface_limit if_limits[] = {
-       {
-               .max = 1,
-               .types = BIT(NL80211_IFTYPE_ADHOC)
-       }, {
-               .max = 8,
-               .types = BIT(NL80211_IFTYPE_STATION) |
-#ifdef CONFIG_MAC80211_MESH
-                        BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
-                        BIT(NL80211_IFTYPE_AP)
-        },
-};
-
-static const struct ieee80211_iface_combination if_comb[] = {
-       {
-               .limits = if_limits,
-               .n_limits = ARRAY_SIZE(if_limits),
-               .max_interfaces = 8,
-               .num_different_channels = 1,
-               .beacon_int_infra_match = true,
-               .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
-                                      BIT(NL80211_CHAN_WIDTH_20) |
-                                      BIT(NL80211_CHAN_WIDTH_40) |
-                                      BIT(NL80211_CHAN_WIDTH_80),
-       }
-};
-
-static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on,
-                                 u8 delay_off)
-{
-       struct mt76x2_dev *dev = container_of(mt76, struct mt76x2_dev,
-                                             mt76);
-       u32 val;
-
-       val = MT_LED_STATUS_DURATION(0xff) |
-             MT_LED_STATUS_OFF(delay_off) |
-             MT_LED_STATUS_ON(delay_on);
-
-       mt76_wr(dev, MT_LED_S0(mt76->led_pin), val);
-       mt76_wr(dev, MT_LED_S1(mt76->led_pin), val);
-
-       val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
-             MT_LED_CTRL_KICK(mt76->led_pin);
-       if (mt76->led_al)
-               val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
-       mt76_wr(dev, MT_LED_CTRL, val);
-}
-
-static int mt76x2_led_set_blink(struct led_classdev *led_cdev,
-                               unsigned long *delay_on,
-                               unsigned long *delay_off)
-{
-       struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
-                                            led_cdev);
-       u8 delta_on, delta_off;
-
-       delta_off = max_t(u8, *delay_off / 10, 1);
-       delta_on = max_t(u8, *delay_on / 10, 1);
-
-       mt76x2_led_set_config(mt76, delta_on, delta_off);
-       return 0;
-}
-
-static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
-                                     enum led_brightness brightness)
-{
-       struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
-                                            led_cdev);
-
-       if (!brightness)
-               mt76x2_led_set_config(mt76, 0, 0xff);
-       else
-               mt76x2_led_set_config(mt76, 0xff, 0);
-}
-
-int mt76x2_register_device(struct mt76x2_dev *dev)
-{
-       struct ieee80211_hw *hw = mt76_hw(dev);
-       struct wiphy *wiphy = hw->wiphy;
-       void *status_fifo;
-       int fifo_size;
-       int i, ret;
-
-       fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x2_tx_status));
-       status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
-       if (!status_fifo)
-               return -ENOMEM;
-
-       kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
-       INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
-       INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
-
-       mt76x2_init_device(dev);
-
-       ret = mt76x2_init_hardware(dev);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
-               u8 *addr = dev->macaddr_list[i].addr;
-
-               memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
-
-               if (!i)
-                       continue;
-
-               addr[0] |= BIT(1);
-               addr[0] ^= ((i - 1) << 2);
-       }
-       wiphy->addresses = dev->macaddr_list;
-       wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
-
-       wiphy->iface_combinations = if_comb;
-       wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
-
-       wiphy->reg_notifier = mt76x2_regd_notifier;
-
-       wiphy->interface_modes =
-               BIT(NL80211_IFTYPE_STATION) |
-               BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
-               BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
-               BIT(NL80211_IFTYPE_ADHOC);
-
-       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
-
-       mt76x2_dfs_init_detector(dev);
-
-       /* init led callbacks */
-       dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
-       dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
-
-       ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
-                                  ARRAY_SIZE(mt76x2_rates));
-       if (ret)
-               goto fail;
-
-       mt76x2_init_debugfs(dev);
-       mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
-       mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
-
-       return 0;
-
-fail:
-       mt76x2_stop_hardware(dev);
-       return ret;
-}
-
-
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
deleted file mode 100644 (file)
index 324b2a4..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-#include "mt76x2_eeprom.h"
-
-#define CCK_RATE(_idx, _rate) {                                        \
-       .bitrate = _rate,                                       \
-       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                 \
-       .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx,              \
-       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx),  \
-}
-
-#define OFDM_RATE(_idx, _rate) {                               \
-       .bitrate = _rate,                                       \
-       .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx,             \
-       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx,       \
-}
-
-struct ieee80211_rate mt76x2_rates[] = {
-       CCK_RATE(0, 10),
-       CCK_RATE(1, 20),
-       CCK_RATE(2, 55),
-       CCK_RATE(3, 110),
-       OFDM_RATE(0, 60),
-       OFDM_RATE(1, 90),
-       OFDM_RATE(2, 120),
-       OFDM_RATE(3, 180),
-       OFDM_RATE(4, 240),
-       OFDM_RATE(5, 360),
-       OFDM_RATE(6, 480),
-       OFDM_RATE(7, 540),
-};
-EXPORT_SYMBOL_GPL(mt76x2_rates);
-
-struct mt76x2_reg_pair {
-       u32 reg;
-       u32 value;
-};
-
-static void
-mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
-{
-       u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
-
-       if (enable)
-               val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
-                       MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
-       else
-               val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
-                        MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
-
-       mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
-       udelay(20);
-}
-
-void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
-{
-       u32 val;
-
-       val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
-
-       val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
-
-       if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
-               val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
-               mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
-               udelay(20);
-
-               val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
-       }
-
-       mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
-       udelay(20);
-
-       mt76x2_set_wlan_state(dev, enable);
-}
-EXPORT_SYMBOL_GPL(mt76x2_reset_wlan);
-
-static void
-mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
-                      const struct mt76x2_reg_pair *data, int len)
-{
-       while (len > 0) {
-               mt76_wr(dev, data->reg, data->value);
-               len--;
-               data++;
-       }
-}
-
-void mt76_write_mac_initvals(struct mt76x2_dev *dev)
-{
-#define DEFAULT_PROT_CFG_CCK                           \
-       (FIELD_PREP(MT_PROT_CFG_RATE, 0x3) |            \
-        FIELD_PREP(MT_PROT_CFG_NAV, 1) |               \
-        FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) |     \
-        MT_PROT_CFG_RTS_THRESH)
-
-#define DEFAULT_PROT_CFG_OFDM                          \
-       (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |         \
-        FIELD_PREP(MT_PROT_CFG_NAV, 1) |                       \
-        FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) |     \
-        MT_PROT_CFG_RTS_THRESH)
-
-#define DEFAULT_PROT_CFG_20                            \
-       (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |         \
-        FIELD_PREP(MT_PROT_CFG_CTRL, 1) |              \
-        FIELD_PREP(MT_PROT_CFG_NAV, 1) |                       \
-        FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
-
-#define DEFAULT_PROT_CFG_40                            \
-       (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) |         \
-        FIELD_PREP(MT_PROT_CFG_CTRL, 1) |              \
-        FIELD_PREP(MT_PROT_CFG_NAV, 1) |                       \
-        FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
-
-       static const struct mt76x2_reg_pair vals[] = {
-               /* Copied from MediaTek reference source */
-               { MT_PBF_SYS_CTRL,              0x00080c00 },
-               { MT_PBF_CFG,                   0x1efebcff },
-               { MT_FCE_PSE_CTRL,              0x00000001 },
-               { MT_MAC_SYS_CTRL,              0x0000000c },
-               { MT_MAX_LEN_CFG,               0x003e3f00 },
-               { MT_AMPDU_MAX_LEN_20M1S,       0xaaa99887 },
-               { MT_AMPDU_MAX_LEN_20M2S,       0x000000aa },
-               { MT_XIFS_TIME_CFG,             0x33a40d0a },
-               { MT_BKOFF_SLOT_CFG,            0x00000209 },
-               { MT_TBTT_SYNC_CFG,             0x00422010 },
-               { MT_PWR_PIN_CFG,               0x00000000 },
-               { 0x1238,                       0x001700c8 },
-               { MT_TX_SW_CFG0,                0x00101001 },
-               { MT_TX_SW_CFG1,                0x00010000 },
-               { MT_TX_SW_CFG2,                0x00000000 },
-               { MT_TXOP_CTRL_CFG,             0x0400583f },
-               { MT_TX_RTS_CFG,                0x00100020 },
-               { MT_TX_TIMEOUT_CFG,            0x000a2290 },
-               { MT_TX_RETRY_CFG,              0x47f01f0f },
-               { MT_EXP_ACK_TIME,              0x002c00dc },
-               { MT_TX_PROT_CFG6,              0xe3f42004 },
-               { MT_TX_PROT_CFG7,              0xe3f42084 },
-               { MT_TX_PROT_CFG8,              0xe3f42104 },
-               { MT_PIFS_TX_CFG,               0x00060fff },
-               { MT_RX_FILTR_CFG,              0x00015f97 },
-               { MT_LEGACY_BASIC_RATE,         0x0000017f },
-               { MT_HT_BASIC_RATE,             0x00004003 },
-               { MT_PN_PAD_MODE,               0x00000003 },
-               { MT_TXOP_HLDR_ET,              0x00000002 },
-               { 0xa44,                        0x00000000 },
-               { MT_HEADER_TRANS_CTRL_REG,     0x00000000 },
-               { MT_TSO_CTRL,                  0x00000000 },
-               { MT_AUX_CLK_CFG,               0x00000000 },
-               { MT_DACCLK_EN_DLY_CFG,         0x00000000 },
-               { MT_TX_ALC_CFG_4,              0x00000000 },
-               { MT_TX_ALC_VGA3,               0x00000000 },
-               { MT_TX_PWR_CFG_0,              0x3a3a3a3a },
-               { MT_TX_PWR_CFG_1,              0x3a3a3a3a },
-               { MT_TX_PWR_CFG_2,              0x3a3a3a3a },
-               { MT_TX_PWR_CFG_3,              0x3a3a3a3a },
-               { MT_TX_PWR_CFG_4,              0x3a3a3a3a },
-               { MT_TX_PWR_CFG_7,              0x3a3a3a3a },
-               { MT_TX_PWR_CFG_8,              0x0000003a },
-               { MT_TX_PWR_CFG_9,              0x0000003a },
-               { MT_EFUSE_CTRL,                0x0000d000 },
-               { MT_PAUSE_ENABLE_CONTROL1,     0x0000000a },
-               { MT_FCE_WLAN_FLOW_CONTROL1,    0x60401c18 },
-               { MT_WPDMA_DELAY_INT_CFG,       0x94ff0000 },
-               { MT_TX_SW_CFG3,                0x00000004 },
-               { MT_HT_FBK_TO_LEGACY,          0x00001818 },
-               { MT_VHT_HT_FBK_CFG1,           0xedcba980 },
-               { MT_PROT_AUTO_TX_CFG,          0x00830083 },
-               { MT_HT_CTRL_CFG,               0x000001ff },
-       };
-       struct mt76x2_reg_pair prot_vals[] = {
-               { MT_CCK_PROT_CFG,              DEFAULT_PROT_CFG_CCK },
-               { MT_OFDM_PROT_CFG,             DEFAULT_PROT_CFG_OFDM },
-               { MT_MM20_PROT_CFG,             DEFAULT_PROT_CFG_20 },
-               { MT_MM40_PROT_CFG,             DEFAULT_PROT_CFG_40 },
-               { MT_GF20_PROT_CFG,             DEFAULT_PROT_CFG_20 },
-               { MT_GF40_PROT_CFG,             DEFAULT_PROT_CFG_40 },
-       };
-
-       mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
-       mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
-}
-EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
-
-void mt76x2_init_device(struct mt76x2_dev *dev)
-{
-       struct ieee80211_hw *hw = mt76_hw(dev);
-
-       hw->queues = 4;
-       hw->max_rates = 1;
-       hw->max_report_rates = 7;
-       hw->max_rate_tries = 1;
-       hw->extra_tx_headroom = 2;
-
-       hw->sta_data_size = sizeof(struct mt76x2_sta);
-       hw->vif_data_size = sizeof(struct mt76x2_vif);
-
-       ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
-       ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
-
-       dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
-       dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
-
-       dev->chainmask = 0x202;
-       dev->global_wcid.idx = 255;
-       dev->global_wcid.hw_key_idx = -1;
-       dev->slottime = 9;
-
-       /* init antenna configuration */
-       dev->mt76.antenna_mask = 3;
-}
-EXPORT_SYMBOL_GPL(mt76x2_init_device);
-
-void mt76x2_init_txpower(struct mt76x2_dev *dev,
-                        struct ieee80211_supported_band *sband)
-{
-       struct ieee80211_channel *chan;
-       struct mt76x2_tx_power_info txp;
-       struct mt76_rate_power t = {};
-       int target_power;
-       int i;
-
-       for (i = 0; i < sband->n_channels; i++) {
-               chan = &sband->channels[i];
-
-               mt76x2_get_power_info(dev, &txp, chan);
-
-               target_power = max_t(int, (txp.chain[0].target_power +
-                                          txp.chain[0].delta),
-                                         (txp.chain[1].target_power +
-                                          txp.chain[1].delta));
-
-               mt76x2_get_rate_power(dev, &t, chan);
-
-               chan->max_power = mt76x2_get_max_rate_power(&t) +
-                                 target_power;
-               chan->max_power /= 2;
-
-               /* convert to combined output power on 2x2 devices */
-               chan->max_power += 3;
-       }
-}
-EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
deleted file mode 100644 (file)
index 23cf437..0000000
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include "mt76x2.h"
-#include "mt76x2_mcu.h"
-#include "mt76x2_eeprom.h"
-#include "mt76x2_trace.h"
-
-void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
-{
-       idx &= 7;
-       mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
-       mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
-                      get_unaligned_le16(addr + 4));
-}
-
-void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
-{
-       struct mt76x2_tx_status stat = {};
-       unsigned long flags;
-       u8 update = 1;
-       bool ret;
-
-       if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
-               return;
-
-       trace_mac_txstat_poll(dev);
-
-       while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
-               spin_lock_irqsave(&dev->irq_lock, flags);
-               ret = mt76x2_mac_load_tx_status(dev, &stat);
-               spin_unlock_irqrestore(&dev->irq_lock, flags);
-
-               if (!ret)
-                       break;
-
-               trace_mac_txstat_fetch(dev, &stat);
-
-               if (!irq) {
-                       mt76x2_send_tx_status(dev, &stat, &update);
-                       continue;
-               }
-
-               kfifo_put(&dev->txstatus_fifo, stat);
-       }
-}
-
-static void
-mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
-                       void *txwi_ptr)
-{
-       struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
-       struct mt76x2_txwi *txwi = txwi_ptr;
-
-       mt76x2_mac_poll_tx_status(dev, false);
-
-       txi->tries = 0;
-       txi->jiffies = jiffies;
-       txi->wcid = txwi->wcid;
-       txi->pktid = txwi->pktid;
-       trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
-       mt76x2_tx_complete(dev, skb);
-}
-
-void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
-{
-       struct mt76x2_tx_status stat;
-       u8 update = 1;
-
-       while (kfifo_get(&dev->txstatus_fifo, &stat))
-               mt76x2_send_tx_status(dev, &stat, &update);
-}
-
-void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
-                           struct mt76_queue_entry *e, bool flush)
-{
-       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-
-       if (e->txwi)
-               mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
-       else
-               dev_kfree_skb_any(e->skb);
-}
-
-static int
-mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
-{
-       int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
-       struct mt76x2_txwi txwi;
-
-       if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
-               return -ENOSPC;
-
-       mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
-
-       mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
-       offset += sizeof(txwi);
-
-       mt76_wr_copy(dev, offset, skb->data, skb->len);
-       return 0;
-}
-
-static int
-__mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
-{
-       int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
-       int beacon_addr = dev->beacon_offsets[bcn_idx];
-       int ret = 0;
-       int i;
-
-       /* Prevent corrupt transmissions during update */
-       mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
-
-       if (skb) {
-               ret = mt76_write_beacon(dev, beacon_addr, skb);
-               if (!ret)
-                       dev->beacon_data_mask |= BIT(bcn_idx) &
-                                                dev->beacon_mask;
-       } else {
-               dev->beacon_data_mask &= ~BIT(bcn_idx);
-               for (i = 0; i < beacon_len; i += 4)
-                       mt76_wr(dev, beacon_addr + i, 0);
-       }
-
-       mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
-
-       return ret;
-}
-
-int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
-                         struct sk_buff *skb)
-{
-       bool force_update = false;
-       int bcn_idx = 0;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
-               if (vif_idx == i) {
-                       force_update = !!dev->beacons[i] ^ !!skb;
-
-                       if (dev->beacons[i])
-                               dev_kfree_skb(dev->beacons[i]);
-
-                       dev->beacons[i] = skb;
-                       __mt76x2_mac_set_beacon(dev, bcn_idx, skb);
-               } else if (force_update && dev->beacons[i]) {
-                       __mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
-               }
-
-               bcn_idx += !!dev->beacons[i];
-       }
-
-       for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
-               if (!(dev->beacon_data_mask & BIT(i)))
-                       break;
-
-               __mt76x2_mac_set_beacon(dev, i, NULL);
-       }
-
-       mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
-                      bcn_idx - 1);
-       return 0;
-}
-
-void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
-{
-       u8 old_mask = dev->beacon_mask;
-       bool en;
-       u32 reg;
-
-       if (val) {
-               dev->beacon_mask |= BIT(vif_idx);
-       } else {
-               dev->beacon_mask &= ~BIT(vif_idx);
-               mt76x2_mac_set_beacon(dev, vif_idx, NULL);
-       }
-
-       if (!!old_mask == !!dev->beacon_mask)
-               return;
-
-       en = dev->beacon_mask;
-
-       mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
-       reg = MT_BEACON_TIME_CFG_BEACON_TX |
-             MT_BEACON_TIME_CFG_TBTT_EN |
-             MT_BEACON_TIME_CFG_TIMER_EN;
-       mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
-
-       if (en)
-               mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
-       else
-               mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
-}
-
-void mt76x2_update_channel(struct mt76_dev *mdev)
-{
-       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-       struct mt76_channel_state *state;
-       u32 active, busy;
-
-       state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
-
-       busy = mt76_rr(dev, MT_CH_BUSY);
-       active = busy + mt76_rr(dev, MT_CH_IDLE);
-
-       spin_lock_bh(&dev->mt76.cc_lock);
-       state->cc_busy += busy;
-       state->cc_active += active;
-       spin_unlock_bh(&dev->mt76.cc_lock);
-}
-
-void mt76x2_mac_work(struct work_struct *work)
-{
-       struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev,
-                                           mac_work.work);
-       int i, idx;
-
-       mt76x2_update_channel(&dev->mt76);
-       for (i = 0, idx = 0; i < 16; i++) {
-               u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
-
-               dev->aggr_stats[idx++] += val & 0xffff;
-               dev->aggr_stats[idx++] += val >> 16;
-       }
-
-       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
-                                    MT_CALIBRATE_INTERVAL);
-}
-
-void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val)
-{
-       u32 data = 0;
-
-       if (val != ~0)
-               data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
-                      MT_PROT_CFG_RTS_THRESH;
-
-       mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
-
-       mt76_rmw(dev, MT_CCK_PROT_CFG,
-                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
-       mt76_rmw(dev, MT_OFDM_PROT_CFG,
-                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
-       mt76_rmw(dev, MT_MM20_PROT_CFG,
-                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
-       mt76_rmw(dev, MT_MM40_PROT_CFG,
-                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
-       mt76_rmw(dev, MT_GF20_PROT_CFG,
-                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
-       mt76_rmw(dev, MT_GF40_PROT_CFG,
-                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
-       mt76_rmw(dev, MT_TX_PROT_CFG6,
-                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
-       mt76_rmw(dev, MT_TX_PROT_CFG7,
-                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
-       mt76_rmw(dev, MT_TX_PROT_CFG8,
-                MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
deleted file mode 100644 (file)
index 5af0107..0000000
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __MT76x2_MAC_H
-#define __MT76x2_MAC_H
-
-#include "mt76.h"
-
-struct mt76x2_dev;
-struct mt76x2_sta;
-struct mt76x2_vif;
-struct mt76x2_txwi;
-
-struct mt76x2_tx_status {
-       u8 valid:1;
-       u8 success:1;
-       u8 aggr:1;
-       u8 ack_req:1;
-       u8 wcid;
-       u8 pktid;
-       u8 retry;
-       u16 rate;
-} __packed __aligned(2);
-
-struct mt76x2_tx_info {
-       unsigned long jiffies;
-       u8 tries;
-
-       u8 wcid;
-       u8 pktid;
-       u8 retry;
-};
-
-struct mt76x2_rxwi {
-       __le32 rxinfo;
-
-       __le32 ctl;
-
-       __le16 tid_sn;
-       __le16 rate;
-
-       u8 rssi[4];
-
-       __le32 bbp_rxinfo[4];
-};
-
-#define MT_RXINFO_BA                   BIT(0)
-#define MT_RXINFO_DATA                 BIT(1)
-#define MT_RXINFO_NULL                 BIT(2)
-#define MT_RXINFO_FRAG                 BIT(3)
-#define MT_RXINFO_UNICAST              BIT(4)
-#define MT_RXINFO_MULTICAST            BIT(5)
-#define MT_RXINFO_BROADCAST            BIT(6)
-#define MT_RXINFO_MYBSS                        BIT(7)
-#define MT_RXINFO_CRCERR               BIT(8)
-#define MT_RXINFO_ICVERR               BIT(9)
-#define MT_RXINFO_MICERR               BIT(10)
-#define MT_RXINFO_AMSDU                        BIT(11)
-#define MT_RXINFO_HTC                  BIT(12)
-#define MT_RXINFO_RSSI                 BIT(13)
-#define MT_RXINFO_L2PAD                        BIT(14)
-#define MT_RXINFO_AMPDU                        BIT(15)
-#define MT_RXINFO_DECRYPT              BIT(16)
-#define MT_RXINFO_BSSIDX3              BIT(17)
-#define MT_RXINFO_WAPI_KEY             BIT(18)
-#define MT_RXINFO_PN_LEN               GENMASK(21, 19)
-#define MT_RXINFO_SW_FTYPE0            BIT(22)
-#define MT_RXINFO_SW_FTYPE1            BIT(23)
-#define MT_RXINFO_PROBE_RESP           BIT(24)
-#define MT_RXINFO_BEACON               BIT(25)
-#define MT_RXINFO_DISASSOC             BIT(26)
-#define MT_RXINFO_DEAUTH               BIT(27)
-#define MT_RXINFO_ACTION               BIT(28)
-#define MT_RXINFO_TCP_SUM_ERR          BIT(30)
-#define MT_RXINFO_IP_SUM_ERR           BIT(31)
-
-#define MT_RXWI_CTL_WCID               GENMASK(7, 0)
-#define MT_RXWI_CTL_KEY_IDX            GENMASK(9, 8)
-#define MT_RXWI_CTL_BSS_IDX            GENMASK(12, 10)
-#define MT_RXWI_CTL_UDF                        GENMASK(15, 13)
-#define MT_RXWI_CTL_MPDU_LEN           GENMASK(29, 16)
-#define MT_RXWI_CTL_EOF                        BIT(31)
-
-#define MT_RXWI_TID                    GENMASK(3, 0)
-#define MT_RXWI_SN                     GENMASK(15, 4)
-
-#define MT_RXWI_RATE_INDEX             GENMASK(5, 0)
-#define MT_RXWI_RATE_LDPC              BIT(6)
-#define MT_RXWI_RATE_BW                        GENMASK(8, 7)
-#define MT_RXWI_RATE_SGI               BIT(9)
-#define MT_RXWI_RATE_STBC              BIT(10)
-#define MT_RXWI_RATE_LDPC_EXSYM                BIT(11)
-#define MT_RXWI_RATE_PHY               GENMASK(15, 13)
-
-#define MT_RATE_INDEX_VHT_IDX          GENMASK(3, 0)
-#define MT_RATE_INDEX_VHT_NSS          GENMASK(5, 4)
-
-#define MT_TX_PWR_ADJ                  GENMASK(3, 0)
-
-enum mt76x2_phy_bandwidth {
-       MT_PHY_BW_20,
-       MT_PHY_BW_40,
-       MT_PHY_BW_80,
-};
-
-#define MT_TXWI_FLAGS_FRAG             BIT(0)
-#define MT_TXWI_FLAGS_MMPS             BIT(1)
-#define MT_TXWI_FLAGS_CFACK            BIT(2)
-#define MT_TXWI_FLAGS_TS               BIT(3)
-#define MT_TXWI_FLAGS_AMPDU            BIT(4)
-#define MT_TXWI_FLAGS_MPDU_DENSITY     GENMASK(7, 5)
-#define MT_TXWI_FLAGS_TXOP             GENMASK(9, 8)
-#define MT_TXWI_FLAGS_NDPS             BIT(10)
-#define MT_TXWI_FLAGS_RTSBWSIG         BIT(11)
-#define MT_TXWI_FLAGS_NDP_BW           GENMASK(13, 12)
-#define MT_TXWI_FLAGS_SOUND            BIT(14)
-#define MT_TXWI_FLAGS_TX_RATE_LUT      BIT(15)
-
-#define MT_TXWI_ACK_CTL_REQ            BIT(0)
-#define MT_TXWI_ACK_CTL_NSEQ           BIT(1)
-#define MT_TXWI_ACK_CTL_BA_WINDOW      GENMASK(7, 2)
-
-#define MT_TXWI_PKTID_PROBE            BIT(7)
-
-struct mt76x2_txwi {
-       __le16 flags;
-       __le16 rate;
-       u8 ack_ctl;
-       u8 wcid;
-       __le16 len_ctl;
-       __le32 iv;
-       __le32 eiv;
-       u8 aid;
-       u8 txstream;
-       u8 ctl2;
-       u8 pktid;
-} __packed __aligned(4);
-
-static inline struct mt76x2_tx_info *
-mt76x2_skb_tx_info(struct sk_buff *skb)
-{
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
-       return (void *) info->status.status_driver_data;
-}
-
-int mt76x2_mac_start(struct mt76x2_dev *dev);
-void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force);
-void mt76x2_mac_resume(struct mt76x2_dev *dev);
-void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr);
-
-int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
-                         void *rxi);
-void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
-                          struct sk_buff *skb, struct mt76_wcid *wcid,
-                          struct ieee80211_sta *sta, int len);
-void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
-int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
-                           struct ieee80211_key_conf *key);
-void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
-                             const struct ieee80211_tx_rate *rate);
-void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop);
-
-int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
-                               struct ieee80211_key_conf *key);
-
-int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
-                         struct sk_buff *skb);
-void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val);
-
-void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq);
-void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev);
-
-void mt76x2_mac_work(struct work_struct *work);
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
deleted file mode 100644 (file)
index 6542644..0000000
+++ /dev/null
@@ -1,699 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-
-void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
-{
-       bool stopped = false;
-       u32 rts_cfg;
-       int i;
-
-       mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
-
-       rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
-       mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
-
-       /* Wait for MAC to become idle */
-       for (i = 0; i < 300; i++) {
-               if ((mt76_rr(dev, MT_MAC_STATUS) &
-                    (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
-                   mt76_rr(dev, MT_BBP(IBI, 12))) {
-                       udelay(1);
-                       continue;
-               }
-
-               stopped = true;
-               break;
-       }
-
-       if (force && !stopped) {
-               mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
-               mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
-
-               mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
-               mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
-       }
-
-       mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
-
-bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
-                              struct mt76x2_tx_status *stat)
-{
-       u32 stat1, stat2;
-
-       stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
-       stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
-
-       stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
-       if (!stat->valid)
-               return false;
-
-       stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
-       stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
-       stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
-       stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
-       stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
-
-       stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
-       stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
-
-       return true;
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_load_tx_status);
-
-static int
-mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
-                          enum nl80211_band band)
-{
-       u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
-       txrate->idx = 0;
-       txrate->flags = 0;
-       txrate->count = 1;
-
-       switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
-       case MT_PHY_TYPE_OFDM:
-               if (band == NL80211_BAND_2GHZ)
-                       idx += 4;
-
-               txrate->idx = idx;
-               return 0;
-       case MT_PHY_TYPE_CCK:
-               if (idx >= 8)
-                       idx -= 8;
-
-               txrate->idx = idx;
-               return 0;
-       case MT_PHY_TYPE_HT_GF:
-               txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
-               /* fall through */
-       case MT_PHY_TYPE_HT:
-               txrate->flags |= IEEE80211_TX_RC_MCS;
-               txrate->idx = idx;
-               break;
-       case MT_PHY_TYPE_VHT:
-               txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
-               txrate->idx = idx;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
-       case MT_PHY_BW_20:
-               break;
-       case MT_PHY_BW_40:
-               txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
-               break;
-       case MT_PHY_BW_80:
-               txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       if (rate & MT_RXWI_RATE_SGI)
-               txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
-
-       return 0;
-}
-
-static void
-mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
-                         struct ieee80211_tx_info *info,
-                         struct mt76x2_tx_status *st, int n_frames)
-{
-       struct ieee80211_tx_rate *rate = info->status.rates;
-       int cur_idx, last_rate;
-       int i;
-
-       if (!n_frames)
-               return;
-
-       last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
-       mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
-                                dev->mt76.chandef.chan->band);
-       if (last_rate < IEEE80211_TX_MAX_RATES - 1)
-               rate[last_rate + 1].idx = -1;
-
-       cur_idx = rate[last_rate].idx + last_rate;
-       for (i = 0; i <= last_rate; i++) {
-               rate[i].flags = rate[last_rate].flags;
-               rate[i].idx = max_t(int, 0, cur_idx - i);
-               rate[i].count = 1;
-       }
-       rate[last_rate].count = st->retry + 1 - last_rate;
-
-       info->status.ampdu_len = n_frames;
-       info->status.ampdu_ack_len = st->success ? n_frames : 0;
-
-       if (st->pktid & MT_TXWI_PKTID_PROBE)
-               info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
-
-       if (st->aggr)
-               info->flags |= IEEE80211_TX_CTL_AMPDU |
-                              IEEE80211_TX_STAT_AMPDU;
-
-       if (!st->ack_req)
-               info->flags |= IEEE80211_TX_CTL_NO_ACK;
-       else if (st->success)
-               info->flags |= IEEE80211_TX_STAT_ACK;
-}
-
-void mt76x2_send_tx_status(struct mt76x2_dev *dev,
-                          struct mt76x2_tx_status *stat, u8 *update)
-{
-       struct ieee80211_tx_info info = {};
-       struct ieee80211_sta *sta = NULL;
-       struct mt76_wcid *wcid = NULL;
-       struct mt76x2_sta *msta = NULL;
-
-       rcu_read_lock();
-       if (stat->wcid < ARRAY_SIZE(dev->wcid))
-               wcid = rcu_dereference(dev->wcid[stat->wcid]);
-
-       if (wcid) {
-               void *priv;
-
-               priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
-               sta = container_of(priv, struct ieee80211_sta,
-                                  drv_priv);
-       }
-
-       if (msta && stat->aggr) {
-               u32 stat_val, stat_cache;
-
-               stat_val = stat->rate;
-               stat_val |= ((u32) stat->retry) << 16;
-               stat_cache = msta->status.rate;
-               stat_cache |= ((u32) msta->status.retry) << 16;
-
-               if (*update == 0 && stat_val == stat_cache &&
-                   stat->wcid == msta->status.wcid && msta->n_frames < 32) {
-                       msta->n_frames++;
-                       goto out;
-               }
-
-               mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
-                                         msta->n_frames);
-
-               msta->status = *stat;
-               msta->n_frames = 1;
-               *update = 0;
-       } else {
-               mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
-               *update = 1;
-       }
-
-       ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
-
-out:
-       rcu_read_unlock();
-}
-EXPORT_SYMBOL_GPL(mt76x2_send_tx_status);
-
-static enum mt76x2_cipher_type
-mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
-{
-       memset(key_data, 0, 32);
-       if (!key)
-               return MT_CIPHER_NONE;
-
-       if (key->keylen > 32)
-               return MT_CIPHER_NONE;
-
-       memcpy(key_data, key->key, key->keylen);
-
-       switch (key->cipher) {
-       case WLAN_CIPHER_SUITE_WEP40:
-               return MT_CIPHER_WEP40;
-       case WLAN_CIPHER_SUITE_WEP104:
-               return MT_CIPHER_WEP104;
-       case WLAN_CIPHER_SUITE_TKIP:
-               return MT_CIPHER_TKIP;
-       case WLAN_CIPHER_SUITE_CCMP:
-               return MT_CIPHER_AES_CCMP;
-       default:
-               return MT_CIPHER_NONE;
-       }
-}
-
-int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
-                               struct ieee80211_key_conf *key)
-{
-       enum mt76x2_cipher_type cipher;
-       u8 key_data[32];
-       u32 val;
-
-       cipher = mt76x2_mac_get_key_info(key, key_data);
-       if (cipher == MT_CIPHER_NONE && key)
-               return -EOPNOTSUPP;
-
-       val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
-       val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
-       val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
-       mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
-
-       mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
-                    sizeof(key_data));
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_shared_key_setup);
-
-int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
-                           struct ieee80211_key_conf *key)
-{
-       enum mt76x2_cipher_type cipher;
-       u8 key_data[32];
-       u8 iv_data[8];
-
-       cipher = mt76x2_mac_get_key_info(key, key_data);
-       if (cipher == MT_CIPHER_NONE && key)
-               return -EOPNOTSUPP;
-
-       mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
-       mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
-
-       memset(iv_data, 0, sizeof(iv_data));
-       if (key) {
-               mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
-                              !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
-               iv_data[3] = key->keyidx << 6;
-               if (cipher >= MT_CIPHER_TKIP)
-                       iv_data[3] |= 0x20;
-       }
-
-       mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_key);
-
-static __le16
-mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
-                      const struct ieee80211_tx_rate *rate, u8 *nss_val)
-{
-       u16 rateval;
-       u8 phy, rate_idx;
-       u8 nss = 1;
-       u8 bw = 0;
-
-       if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
-               rate_idx = rate->idx;
-               nss = 1 + (rate->idx >> 4);
-               phy = MT_PHY_TYPE_VHT;
-               if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
-                       bw = 2;
-               else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-                       bw = 1;
-       } else if (rate->flags & IEEE80211_TX_RC_MCS) {
-               rate_idx = rate->idx;
-               nss = 1 + (rate->idx >> 3);
-               phy = MT_PHY_TYPE_HT;
-               if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
-                       phy = MT_PHY_TYPE_HT_GF;
-               if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-                       bw = 1;
-       } else {
-               const struct ieee80211_rate *r;
-               int band = dev->mt76.chandef.chan->band;
-               u16 val;
-
-               r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
-               if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
-                       val = r->hw_value_short;
-               else
-                       val = r->hw_value;
-
-               phy = val >> 8;
-               rate_idx = val & 0xff;
-               bw = 0;
-       }
-
-       rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
-       rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
-       rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
-       if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
-               rateval |= MT_RXWI_RATE_SGI;
-
-       *nss_val = nss;
-       return cpu_to_le16(rateval);
-}
-
-void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
-                             const struct ieee80211_tx_rate *rate)
-{
-       spin_lock_bh(&dev->mt76.lock);
-       wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
-       wcid->tx_rate_set = true;
-       spin_unlock_bh(&dev->mt76.lock);
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_rate);
-
-void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
-                          struct sk_buff *skb, struct mt76_wcid *wcid,
-                          struct ieee80211_sta *sta, int len)
-{
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_tx_rate *rate = &info->control.rates[0];
-       struct ieee80211_key_conf *key = info->control.hw_key;
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-       u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
-       u16 txwi_flags = 0;
-       u8 nss;
-       s8 txpwr_adj, max_txpwr_adj;
-       u8 ccmp_pn[8];
-
-       memset(txwi, 0, sizeof(*txwi));
-
-       if (wcid)
-               txwi->wcid = wcid->idx;
-       else
-               txwi->wcid = 0xff;
-
-       txwi->pktid = 1;
-
-       if (wcid && wcid->sw_iv && key) {
-               u64 pn = atomic64_inc_return(&key->tx_pn);
-               ccmp_pn[0] = pn;
-               ccmp_pn[1] = pn >> 8;
-               ccmp_pn[2] = 0;
-               ccmp_pn[3] = 0x20 | (key->keyidx << 6);
-               ccmp_pn[4] = pn >> 16;
-               ccmp_pn[5] = pn >> 24;
-               ccmp_pn[6] = pn >> 32;
-               ccmp_pn[7] = pn >> 40;
-               txwi->iv = *((__le32 *)&ccmp_pn[0]);
-               txwi->eiv = *((__le32 *)&ccmp_pn[1]);
-       }
-
-       spin_lock_bh(&dev->mt76.lock);
-       if (wcid && (rate->idx < 0 || !rate->count)) {
-               txwi->rate = wcid->tx_rate;
-               max_txpwr_adj = wcid->max_txpwr_adj;
-               nss = wcid->tx_rate_nss;
-       } else {
-               txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
-               max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
-       }
-       spin_unlock_bh(&dev->mt76.lock);
-
-       txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
-                                           max_txpwr_adj);
-       txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
-
-       if (mt76xx_rev(dev) >= MT76XX_REV_E4)
-               txwi->txstream = 0x13;
-       else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
-                !(txwi->rate & cpu_to_le16(rate_ht_mask)))
-               txwi->txstream = 0x93;
-
-       if (info->flags & IEEE80211_TX_CTL_LDPC)
-               txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
-       if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
-               txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
-       if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
-               txwi_flags |= MT_TXWI_FLAGS_MMPS;
-       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
-               txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
-       if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
-               txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
-       if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
-               txwi->pktid |= MT_TXWI_PKTID_PROBE;
-       if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
-               u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
-
-               ba_size <<= sta->ht_cap.ampdu_factor;
-               ba_size = min_t(int, 63, ba_size - 1);
-               if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
-                       ba_size = 0;
-               txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
-
-               txwi_flags |= MT_TXWI_FLAGS_AMPDU |
-                        FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
-                                   sta->ht_cap.ampdu_density);
-       }
-
-       if (ieee80211_is_probe_resp(hdr->frame_control) ||
-           ieee80211_is_beacon(hdr->frame_control))
-               txwi_flags |= MT_TXWI_FLAGS_TS;
-
-       txwi->flags |= cpu_to_le16(txwi_flags);
-       txwi->len_ctl = cpu_to_le16(len);
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi);
-
-void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
-{
-       u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
-       u32 bit = MT_WCID_DROP_MASK(idx);
-
-       /* prevent unnecessary writes */
-       if ((val & bit) != (bit * drop))
-               mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_drop);
-
-void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
-{
-       struct mt76_wcid_addr addr = {};
-       u32 attr;
-
-       attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
-              FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
-
-       mt76_wr(dev, MT_WCID_ATTR(idx), attr);
-
-       mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
-       mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
-
-       if (idx >= 128)
-               return;
-
-       if (mac)
-               memcpy(addr.macaddr, mac, ETH_ALEN);
-
-       mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_setup);
-
-static int
-mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
-{
-       u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
-       switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
-       case MT_PHY_TYPE_OFDM:
-               if (idx >= 8)
-                       idx = 0;
-
-               if (status->band == NL80211_BAND_2GHZ)
-                       idx += 4;
-
-               status->rate_idx = idx;
-               return 0;
-       case MT_PHY_TYPE_CCK:
-               if (idx >= 8) {
-                       idx -= 8;
-                       status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
-               }
-
-               if (idx >= 4)
-                       idx = 0;
-
-               status->rate_idx = idx;
-               return 0;
-       case MT_PHY_TYPE_HT_GF:
-               status->enc_flags |= RX_ENC_FLAG_HT_GF;
-               /* fall through */
-       case MT_PHY_TYPE_HT:
-               status->encoding = RX_ENC_HT;
-               status->rate_idx = idx;
-               break;
-       case MT_PHY_TYPE_VHT:
-               status->encoding = RX_ENC_VHT;
-               status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
-               status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       if (rate & MT_RXWI_RATE_LDPC)
-               status->enc_flags |= RX_ENC_FLAG_LDPC;
-
-       if (rate & MT_RXWI_RATE_SGI)
-               status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
-
-       if (rate & MT_RXWI_RATE_STBC)
-               status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
-
-       switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
-       case MT_PHY_BW_20:
-               break;
-       case MT_PHY_BW_40:
-               status->bw = RATE_INFO_BW_40;
-               break;
-       case MT_PHY_BW_80:
-               status->bw = RATE_INFO_BW_80;
-               break;
-       default:
-               break;
-       }
-
-       return 0;
-}
-
-static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
-{
-       int hdrlen;
-
-       if (!len)
-               return;
-
-       hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-       memmove(skb->data + len, skb->data, hdrlen);
-       skb_pull(skb, len);
-}
-
-int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
-{
-       struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
-
-       rssi += cal->rssi_offset[chain];
-       rssi -= cal->lna_gain;
-
-       return rssi;
-}
-
-static struct mt76x2_sta *
-mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx)
-{
-       struct mt76_wcid *wcid;
-
-       if (idx >= ARRAY_SIZE(dev->wcid))
-               return NULL;
-
-       wcid = rcu_dereference(dev->wcid[idx]);
-       if (!wcid)
-               return NULL;
-
-       return container_of(wcid, struct mt76x2_sta, wcid);
-}
-
-static struct mt76_wcid *
-mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta,
-                      bool unicast)
-{
-       if (!sta)
-               return NULL;
-
-       if (unicast)
-               return &sta->wcid;
-       else
-               return &sta->vif->group_wcid;
-}
-
-int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
-                         void *rxi)
-{
-       struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
-       struct mt76x2_rxwi *rxwi = rxi;
-       struct mt76x2_sta *sta;
-       u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
-       u32 ctl = le32_to_cpu(rxwi->ctl);
-       u16 rate = le16_to_cpu(rxwi->rate);
-       u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
-       bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
-       int pad_len = 0;
-       u8 pn_len;
-       u8 wcid;
-       int len;
-
-       if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
-               return -EINVAL;
-
-       if (rxinfo & MT_RXINFO_L2PAD)
-               pad_len += 2;
-
-       if (rxinfo & MT_RXINFO_DECRYPT) {
-               status->flag |= RX_FLAG_DECRYPTED;
-               status->flag |= RX_FLAG_MMIC_STRIPPED;
-               status->flag |= RX_FLAG_MIC_STRIPPED;
-               status->flag |= RX_FLAG_IV_STRIPPED;
-       }
-
-       wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
-       sta = mt76x2_rx_get_sta(dev, wcid);
-       status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast);
-
-       len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
-       pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
-       if (pn_len) {
-               int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
-               u8 *data = skb->data + offset;
-
-               status->iv[0] = data[7];
-               status->iv[1] = data[6];
-               status->iv[2] = data[5];
-               status->iv[3] = data[4];
-               status->iv[4] = data[1];
-               status->iv[5] = data[0];
-
-               /*
-                * Driver CCMP validation can't deal with fragments.
-                * Let mac80211 take care of it.
-                */
-               if (rxinfo & MT_RXINFO_FRAG) {
-                       status->flag &= ~RX_FLAG_IV_STRIPPED;
-               } else {
-                       pad_len += pn_len << 2;
-                       len -= pn_len << 2;
-               }
-       }
-
-       mt76x2_remove_hdr_pad(skb, pad_len);
-
-       if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
-               status->aggr = true;
-
-       if (WARN_ON_ONCE(len > skb->len))
-               return -EINVAL;
-
-       pskb_trim(skb, len);
-       status->chains = BIT(0) | BIT(1);
-       status->chain_signal[0] = mt76x2_mac_get_rssi(dev, rxwi->rssi[0], 0);
-       status->chain_signal[1] = mt76x2_mac_get_rssi(dev, rxwi->rssi[1], 1);
-       status->signal = max(status->chain_signal[0], status->chain_signal[1]);
-       status->freq = dev->mt76.chandef.chan->center_freq;
-       status->band = dev->mt76.chandef.chan->band;
-
-       status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
-       status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
-
-       if (sta) {
-               ewma_signal_add(&sta->rssi, status->signal);
-               sta->inactive_count = 0;
-       }
-
-       return mt76x2_mac_process_rate(status, rate);
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_process_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
deleted file mode 100644 (file)
index 680a89f..0000000
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-
-static int
-mt76x2_start(struct ieee80211_hw *hw)
-{
-       struct mt76x2_dev *dev = hw->priv;
-       int ret;
-
-       mutex_lock(&dev->mutex);
-
-       ret = mt76x2_mac_start(dev);
-       if (ret)
-               goto out;
-
-       ret = mt76x2_phy_start(dev);
-       if (ret)
-               goto out;
-
-       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
-                                    MT_CALIBRATE_INTERVAL);
-
-       set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
-
-out:
-       mutex_unlock(&dev->mutex);
-       return ret;
-}
-
-static void
-mt76x2_stop(struct ieee80211_hw *hw)
-{
-       struct mt76x2_dev *dev = hw->priv;
-
-       mutex_lock(&dev->mutex);
-       clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
-       mt76x2_stop_hardware(dev);
-       mutex_unlock(&dev->mutex);
-}
-
-static int
-mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
-       struct mt76x2_dev *dev = hw->priv;
-       struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
-       unsigned int idx = 0;
-
-       if (vif->addr[0] & BIT(1))
-               idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
-
-       /*
-        * Client mode typically only has one configurable BSSID register,
-        * which is used for bssidx=0. This is linked to the MAC address.
-        * Since mac80211 allows changing interface types, and we cannot
-        * force the use of the primary MAC address for a station mode
-        * interface, we need some other way of configuring a per-interface
-        * remote BSSID.
-        * The hardware provides an AP-Client feature, where bssidx 0-7 are
-        * used for AP mode and bssidx 8-15 for client mode.
-        * We shift the station interface bss index by 8 to force the
-        * hardware to recognize the BSSID.
-        * The resulting bssidx mismatch for unicast frames is ignored by hw.
-        */
-       if (vif->type == NL80211_IFTYPE_STATION)
-               idx += 8;
-
-       mvif->idx = idx;
-       mvif->group_wcid.idx = MT_VIF_WCID(idx);
-       mvif->group_wcid.hw_key_idx = -1;
-       mt76x2_txq_init(dev, vif->txq);
-
-       return 0;
-}
-
-static int
-mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
-{
-       int ret;
-
-       cancel_delayed_work_sync(&dev->cal_work);
-
-       set_bit(MT76_RESET, &dev->mt76.state);
-
-       mt76_set_channel(&dev->mt76);
-
-       tasklet_disable(&dev->pre_tbtt_tasklet);
-       tasklet_disable(&dev->dfs_pd.dfs_tasklet);
-
-       mt76x2_mac_stop(dev, true);
-       ret = mt76x2_phy_set_channel(dev, chandef);
-
-       /* channel cycle counters read-and-clear */
-       mt76_rr(dev, MT_CH_IDLE);
-       mt76_rr(dev, MT_CH_BUSY);
-
-       mt76x2_dfs_init_params(dev);
-
-       mt76x2_mac_resume(dev);
-       tasklet_enable(&dev->dfs_pd.dfs_tasklet);
-       tasklet_enable(&dev->pre_tbtt_tasklet);
-
-       clear_bit(MT76_RESET, &dev->mt76.state);
-
-       mt76_txq_schedule_all(&dev->mt76);
-
-       return ret;
-}
-
-static int
-mt76x2_config(struct ieee80211_hw *hw, u32 changed)
-{
-       struct mt76x2_dev *dev = hw->priv;
-       int ret = 0;
-
-       mutex_lock(&dev->mutex);
-
-       if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
-               if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
-                       dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
-               else
-                       dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
-
-               mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-       }
-
-       if (changed & IEEE80211_CONF_CHANGE_POWER) {
-               dev->txpower_conf = hw->conf.power_level * 2;
-
-               /* convert to per-chain power for 2x2 devices */
-               dev->txpower_conf -= 6;
-
-               if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
-                       mt76x2_phy_set_txpower(dev);
-                       mt76x2_tx_set_txpwr_auto(dev, dev->txpower_conf);
-               }
-       }
-
-       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
-               ieee80211_stop_queues(hw);
-               ret = mt76x2_set_channel(dev, &hw->conf.chandef);
-               ieee80211_wake_queues(hw);
-       }
-
-       mutex_unlock(&dev->mutex);
-
-       return ret;
-}
-
-static void
-mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                       struct ieee80211_bss_conf *info, u32 changed)
-{
-       struct mt76x2_dev *dev = hw->priv;
-       struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
-
-       mutex_lock(&dev->mutex);
-
-       if (changed & BSS_CHANGED_BSSID)
-               mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
-
-       if (changed & BSS_CHANGED_BEACON_INT) {
-               mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
-                              MT_BEACON_TIME_CFG_INTVAL,
-                              info->beacon_int << 4);
-               dev->beacon_int = info->beacon_int;
-               dev->tbtt_count = 0;
-       }
-
-       if (changed & BSS_CHANGED_BEACON_ENABLED) {
-               tasklet_disable(&dev->pre_tbtt_tasklet);
-               mt76x2_mac_set_beacon_enable(dev, mvif->idx,
-                                            info->enable_beacon);
-               tasklet_enable(&dev->pre_tbtt_tasklet);
-       }
-
-       if (changed & BSS_CHANGED_ERP_SLOT) {
-               int slottime = info->use_short_slot ? 9 : 20;
-
-               dev->slottime = slottime;
-               mt76x2_set_tx_ackto(dev);
-       }
-
-       mutex_unlock(&dev->mutex);
-}
-
-void
-mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
-{
-       struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
-       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-       int idx = msta->wcid.idx;
-
-       mt76_stop_tx_queues(&dev->mt76, sta, true);
-       mt76x2_mac_wcid_set_drop(dev, idx, ps);
-}
-
-static void
-mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-              const u8 *mac)
-{
-       struct mt76x2_dev *dev = hw->priv;
-
-       tasklet_disable(&dev->pre_tbtt_tasklet);
-       set_bit(MT76_SCANNING, &dev->mt76.state);
-}
-
-static void
-mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
-       struct mt76x2_dev *dev = hw->priv;
-
-       clear_bit(MT76_SCANNING, &dev->mt76.state);
-       tasklet_enable(&dev->pre_tbtt_tasklet);
-}
-
-static void
-mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-            u32 queues, bool drop)
-{
-}
-
-static int
-mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
-{
-       struct mt76x2_dev *dev = hw->priv;
-
-       *dbm = dev->txpower_cur / 2;
-
-       /* convert from per-chain power to combined output on 2x2 devices */
-       *dbm += 3;
-
-       return 0;
-}
-
-static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
-                                     s16 coverage_class)
-{
-       struct mt76x2_dev *dev = hw->priv;
-
-       mutex_lock(&dev->mutex);
-       dev->coverage_class = coverage_class;
-       mt76x2_set_tx_ackto(dev);
-       mutex_unlock(&dev->mutex);
-}
-
-static int
-mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
-{
-       return 0;
-}
-
-static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
-                             u32 rx_ant)
-{
-       struct mt76x2_dev *dev = hw->priv;
-
-       if (!tx_ant || tx_ant > 3 || tx_ant != rx_ant)
-               return -EINVAL;
-
-       mutex_lock(&dev->mutex);
-
-       dev->chainmask = (tx_ant == 3) ? 0x202 : 0x101;
-       dev->mt76.antenna_mask = tx_ant;
-
-       mt76_set_stream_caps(&dev->mt76, true);
-       mt76x2_phy_set_antenna(dev);
-
-       mutex_unlock(&dev->mutex);
-
-       return 0;
-}
-
-static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
-                             u32 *rx_ant)
-{
-       struct mt76x2_dev *dev = hw->priv;
-
-       mutex_lock(&dev->mutex);
-       *tx_ant = dev->mt76.antenna_mask;
-       *rx_ant = dev->mt76.antenna_mask;
-       mutex_unlock(&dev->mutex);
-
-       return 0;
-}
-
-static int
-mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
-{
-       struct mt76x2_dev *dev = hw->priv;
-
-       if (val != ~0 && val > 0xffff)
-               return -EINVAL;
-
-       mutex_lock(&dev->mutex);
-       mt76x2_mac_set_tx_protection(dev, val);
-       mutex_unlock(&dev->mutex);
-
-       return 0;
-}
-
-const struct ieee80211_ops mt76x2_ops = {
-       .tx = mt76x2_tx,
-       .start = mt76x2_start,
-       .stop = mt76x2_stop,
-       .add_interface = mt76x2_add_interface,
-       .remove_interface = mt76x2_remove_interface,
-       .config = mt76x2_config,
-       .configure_filter = mt76x2_configure_filter,
-       .bss_info_changed = mt76x2_bss_info_changed,
-       .sta_add = mt76x2_sta_add,
-       .sta_remove = mt76x2_sta_remove,
-       .set_key = mt76x2_set_key,
-       .conf_tx = mt76x2_conf_tx,
-       .sw_scan_start = mt76x2_sw_scan,
-       .sw_scan_complete = mt76x2_sw_scan_complete,
-       .flush = mt76x2_flush,
-       .ampdu_action = mt76x2_ampdu_action,
-       .get_txpower = mt76x2_get_txpower,
-       .wake_tx_queue = mt76_wake_tx_queue,
-       .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
-       .release_buffered_frames = mt76_release_buffered_frames,
-       .set_coverage_class = mt76x2_set_coverage_class,
-       .get_survey = mt76_get_survey,
-       .set_tim = mt76x2_set_tim,
-       .set_antenna = mt76x2_set_antenna,
-       .get_antenna = mt76x2_get_antenna,
-       .set_rts_threshold = mt76x2_set_rts_threshold,
-};
-
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
deleted file mode 100644 (file)
index 743da57..0000000
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/firmware.h>
-#include <linux/delay.h>
-
-#include "mt76x2.h"
-#include "mt76x2_mcu.h"
-#include "mt76x2_dma.h"
-#include "mt76x2_eeprom.h"
-
-static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
-{
-       struct sk_buff *skb;
-
-       skb = alloc_skb(len, GFP_KERNEL);
-       if (!skb)
-               return NULL;
-       memcpy(skb_put(skb, len), data, len);
-
-       return skb;
-}
-
-static struct sk_buff *
-mt76x2_mcu_get_response(struct mt76x2_dev *dev, unsigned long expires)
-{
-       unsigned long timeout;
-
-       if (!time_is_after_jiffies(expires))
-               return NULL;
-
-       timeout = expires - jiffies;
-       wait_event_timeout(dev->mcu.wait, !skb_queue_empty(&dev->mcu.res_q),
-                          timeout);
-       return skb_dequeue(&dev->mcu.res_q);
-}
-
-static int
-mt76x2_mcu_msg_send(struct mt76x2_dev *dev, struct sk_buff *skb,
-                   enum mcu_cmd cmd)
-{
-       unsigned long expires = jiffies + HZ;
-       int ret;
-       u8 seq;
-
-       if (!skb)
-               return -EINVAL;
-
-       mutex_lock(&dev->mcu.mutex);
-
-       seq = ++dev->mcu.msg_seq & 0xf;
-       if (!seq)
-               seq = ++dev->mcu.msg_seq & 0xf;
-
-       ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
-       if (ret)
-               goto out;
-
-       while (1) {
-               u32 *rxfce;
-               bool check_seq = false;
-
-               skb = mt76x2_mcu_get_response(dev, expires);
-               if (!skb) {
-                       dev_err(dev->mt76.dev,
-                               "MCU message %d (seq %d) timed out\n", cmd,
-                               seq);
-                       ret = -ETIMEDOUT;
-                       break;
-               }
-
-               rxfce = (u32 *) skb->cb;
-
-               if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
-                       check_seq = true;
-
-               dev_kfree_skb(skb);
-               if (check_seq)
-                       break;
-       }
-
-out:
-       mutex_unlock(&dev->mcu.mutex);
-
-       return ret;
-}
-
-static int
-mt76pci_load_rom_patch(struct mt76x2_dev *dev)
-{
-       const struct firmware *fw = NULL;
-       struct mt76x2_patch_header *hdr;
-       bool rom_protect = !is_mt7612(dev);
-       int len, ret = 0;
-       __le32 *cur;
-       u32 patch_mask, patch_reg;
-
-       if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
-               dev_err(dev->mt76.dev,
-                       "Could not get hardware semaphore for ROM PATCH\n");
-               return -ETIMEDOUT;
-       }
-
-       if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
-               patch_mask = BIT(0);
-               patch_reg = MT_MCU_CLOCK_CTL;
-       } else {
-               patch_mask = BIT(1);
-               patch_reg = MT_MCU_COM_REG0;
-       }
-
-       if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
-               dev_info(dev->mt76.dev, "ROM patch already applied\n");
-               goto out;
-       }
-
-       ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
-       if (ret)
-               goto out;
-
-       if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
-               ret = -EIO;
-               dev_err(dev->mt76.dev, "Failed to load firmware\n");
-               goto out;
-       }
-
-       hdr = (struct mt76x2_patch_header *) fw->data;
-       dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
-
-       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
-
-       cur = (__le32 *) (fw->data + sizeof(*hdr));
-       len = fw->size - sizeof(*hdr);
-       mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
-
-       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
-
-       /* Trigger ROM */
-       mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
-
-       if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
-               dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
-               ret = -ETIMEDOUT;
-       }
-
-out:
-       /* release semaphore */
-       if (rom_protect)
-               mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
-       release_firmware(fw);
-       return ret;
-}
-
-static int
-mt76pci_load_firmware(struct mt76x2_dev *dev)
-{
-       const struct firmware *fw;
-       const struct mt76x2_fw_header *hdr;
-       int len, ret;
-       __le32 *cur;
-       u32 offset, val;
-
-       ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
-       if (ret)
-               return ret;
-
-       if (!fw || !fw->data || fw->size < sizeof(*hdr))
-               goto error;
-
-       hdr = (const struct mt76x2_fw_header *) fw->data;
-
-       len = sizeof(*hdr);
-       len += le32_to_cpu(hdr->ilm_len);
-       len += le32_to_cpu(hdr->dlm_len);
-
-       if (fw->size != len)
-               goto error;
-
-       val = le16_to_cpu(hdr->fw_ver);
-       dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
-                (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
-
-       val = le16_to_cpu(hdr->build_ver);
-       dev_info(dev->mt76.dev, "Build: %x\n", val);
-       dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
-
-       cur = (__le32 *) (fw->data + sizeof(*hdr));
-       len = le32_to_cpu(hdr->ilm_len);
-
-       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
-       mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
-
-       cur += len / sizeof(*cur);
-       len = le32_to_cpu(hdr->dlm_len);
-
-       if (mt76xx_rev(dev) >= MT76XX_REV_E3)
-               offset = MT_MCU_DLM_ADDR_E3;
-       else
-               offset = MT_MCU_DLM_ADDR;
-
-       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
-       mt76_wr_copy(dev, offset, cur, len);
-
-       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
-
-       val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
-       if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
-               mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
-
-       /* trigger firmware */
-       mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
-       if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 200)) {
-               dev_err(dev->mt76.dev, "Firmware failed to start\n");
-               release_firmware(fw);
-               return -ETIMEDOUT;
-       }
-
-       dev_info(dev->mt76.dev, "Firmware running!\n");
-
-       release_firmware(fw);
-
-       return ret;
-
-error:
-       dev_err(dev->mt76.dev, "Invalid firmware\n");
-       release_firmware(fw);
-       return -ENOENT;
-}
-
-static int
-mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
-                          u32 val)
-{
-       struct sk_buff *skb;
-       struct {
-           __le32 id;
-           __le32 value;
-       } __packed __aligned(4) msg = {
-           .id = cpu_to_le32(func),
-           .value = cpu_to_le32(val),
-       };
-
-       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
-       return mt76x2_mcu_msg_send(dev, skb, CMD_FUN_SET_OP);
-}
-
-int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
-                      u8 channel)
-{
-       struct sk_buff *skb;
-       struct {
-               u8 cr_mode;
-               u8 temp;
-               u8 ch;
-               u8 _pad0;
-
-               __le32 cfg;
-       } __packed __aligned(4) msg = {
-               .cr_mode = type,
-               .temp = temp_level,
-               .ch = channel,
-       };
-       u32 val;
-
-       val = BIT(31);
-       val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
-       val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
-       msg.cfg = cpu_to_le32(val);
-
-       /* first set the channel without the extension channel info */
-       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
-       return mt76x2_mcu_msg_send(dev, skb, CMD_LOAD_CR);
-}
-
-int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
-                          u8 bw_index, bool scan)
-{
-       struct sk_buff *skb;
-       struct {
-               u8 idx;
-               u8 scan;
-               u8 bw;
-               u8 _pad0;
-
-               __le16 chainmask;
-               u8 ext_chan;
-               u8 _pad1;
-
-       } __packed __aligned(4) msg = {
-               .idx = channel,
-               .scan = scan,
-               .bw = bw,
-               .chainmask = cpu_to_le16(dev->chainmask),
-       };
-
-       /* first set the channel without the extension channel info */
-       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
-       mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
-
-       usleep_range(5000, 10000);
-
-       msg.ext_chan = 0xe0 + bw_index;
-       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
-       return mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
-}
-
-int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on)
-{
-       struct sk_buff *skb;
-       struct {
-               __le32 mode;
-               __le32 level;
-       } __packed __aligned(4) msg = {
-               .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
-               .level = cpu_to_le32(0),
-       };
-
-       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
-       return mt76x2_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP);
-}
-
-int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
-                        u32 param)
-{
-       struct sk_buff *skb;
-       struct {
-               __le32 id;
-               __le32 value;
-       } __packed __aligned(4) msg = {
-               .id = cpu_to_le32(type),
-               .value = cpu_to_le32(param),
-       };
-       int ret;
-
-       mt76_clear(dev, MT_MCU_COM_REG0, BIT(31));
-
-       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
-       ret = mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
-       if (ret)
-               return ret;
-
-       if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
-                                   BIT(31), BIT(31), 100)))
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
-int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
-                        struct mt76x2_tssi_comp *tssi_data)
-{
-       struct sk_buff *skb;
-       struct {
-               __le32 id;
-               struct mt76x2_tssi_comp data;
-       } __packed __aligned(4) msg = {
-               .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
-               .data = *tssi_data,
-       };
-
-       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
-       return mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
-}
-
-int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
-                        bool force)
-{
-       struct sk_buff *skb;
-       struct {
-               __le32 channel;
-               __le32 gain_val;
-       } __packed __aligned(4) msg = {
-               .channel = cpu_to_le32(channel),
-               .gain_val = cpu_to_le32(gain),
-       };
-
-       if (force)
-               msg.channel |= cpu_to_le32(BIT(31));
-
-       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
-       return mt76x2_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP);
-}
-
-int mt76x2_mcu_init(struct mt76x2_dev *dev)
-{
-       int ret;
-
-       mutex_init(&dev->mcu.mutex);
-
-       ret = mt76pci_load_rom_patch(dev);
-       if (ret)
-               return ret;
-
-       ret = mt76pci_load_firmware(dev);
-       if (ret)
-               return ret;
-
-       mt76x2_mcu_function_select(dev, Q_SELECT, 1);
-       return 0;
-}
-
-int mt76x2_mcu_cleanup(struct mt76x2_dev *dev)
-{
-       struct sk_buff *skb;
-
-       mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
-       usleep_range(20000, 30000);
-
-       while ((skb = skb_dequeue(&dev->mcu.res_q)) != NULL)
-               dev_kfree_skb(skb);
-
-       return 0;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
deleted file mode 100644 (file)
index e40293f..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __MT76x2_MCU_H
-#define __MT76x2_MCU_H
-
-/* Register definitions */
-#define MT_MCU_CPU_CTL                 0x0704
-#define MT_MCU_CLOCK_CTL               0x0708
-#define MT_MCU_RESET_CTL               0x070C
-#define MT_MCU_INT_LEVEL               0x0718
-#define MT_MCU_COM_REG0                        0x0730
-#define MT_MCU_COM_REG1                        0x0734
-#define MT_MCU_COM_REG2                        0x0738
-#define MT_MCU_COM_REG3                        0x073C
-#define MT_MCU_PCIE_REMAP_BASE1                0x0740
-#define MT_MCU_PCIE_REMAP_BASE2                0x0744
-#define MT_MCU_PCIE_REMAP_BASE3                0x0748
-#define MT_MCU_PCIE_REMAP_BASE4                0x074C
-
-#define MT_LED_CTRL                    0x0770
-#define MT_LED_CTRL_REPLAY(_n)         BIT(0 + (8 * (_n)))
-#define MT_LED_CTRL_POLARITY(_n)       BIT(1 + (8 * (_n)))
-#define MT_LED_CTRL_TX_BLINK_MODE(_n)  BIT(2 + (8 * (_n)))
-#define MT_LED_CTRL_KICK(_n)           BIT(7 + (8 * (_n)))
-
-#define MT_LED_TX_BLINK_0              0x0774
-#define MT_LED_TX_BLINK_1              0x0778
-
-#define MT_LED_S0_BASE                 0x077C
-#define MT_LED_S0(_n)                  (MT_LED_S0_BASE + 8 * (_n))
-#define MT_LED_S1_BASE                 0x0780
-#define MT_LED_S1(_n)                  (MT_LED_S1_BASE + 8 * (_n))
-#define MT_LED_STATUS_OFF_MASK         GENMASK(31, 24)
-#define MT_LED_STATUS_OFF(_v)          (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
-                                        MT_LED_STATUS_OFF_MASK)
-#define MT_LED_STATUS_ON_MASK          GENMASK(23, 16)
-#define MT_LED_STATUS_ON(_v)           (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
-                                        MT_LED_STATUS_ON_MASK)
-#define MT_LED_STATUS_DURATION_MASK    GENMASK(15, 8)
-#define MT_LED_STATUS_DURATION(_v)     (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
-                                        MT_LED_STATUS_DURATION_MASK)
-
-#define MT_MCU_SEMAPHORE_00            0x07B0
-#define MT_MCU_SEMAPHORE_01            0x07B4
-#define MT_MCU_SEMAPHORE_02            0x07B8
-#define MT_MCU_SEMAPHORE_03            0x07BC
-
-#define MT_MCU_ROM_PATCH_OFFSET                0x80000
-#define MT_MCU_ROM_PATCH_ADDR          0x90000
-
-#define MT_MCU_ILM_OFFSET              0x80000
-#define MT_MCU_ILM_ADDR                        0x80000
-
-#define MT_MCU_DLM_OFFSET              0x100000
-#define MT_MCU_DLM_ADDR                        0x90000
-#define MT_MCU_DLM_ADDR_E3             0x90800
-
-enum mcu_cmd {
-       CMD_FUN_SET_OP = 1,
-       CMD_LOAD_CR = 2,
-       CMD_INIT_GAIN_OP = 3,
-       CMD_DYNC_VGA_OP = 6,
-       CMD_TDLS_CH_SW = 7,
-       CMD_BURST_WRITE = 8,
-       CMD_READ_MODIFY_WRITE = 9,
-       CMD_RANDOM_READ = 10,
-       CMD_BURST_READ = 11,
-       CMD_RANDOM_WRITE = 12,
-       CMD_LED_MODE_OP = 16,
-       CMD_POWER_SAVING_OP = 20,
-       CMD_WOW_CONFIG = 21,
-       CMD_WOW_QUERY = 22,
-       CMD_WOW_FEATURE = 24,
-       CMD_CARRIER_DETECT_OP = 28,
-       CMD_RADOR_DETECT_OP = 29,
-       CMD_SWITCH_CHANNEL_OP = 30,
-       CMD_CALIBRATION_OP = 31,
-       CMD_BEACON_OP = 32,
-       CMD_ANTENNA_OP = 33,
-};
-
-enum mcu_function {
-       Q_SELECT = 1,
-       BW_SETTING = 2,
-       USB2_SW_DISCONNECT = 2,
-       USB3_SW_DISCONNECT = 3,
-       LOG_FW_DEBUG_MSG = 4,
-       GET_FW_VERSION = 5,
-};
-
-enum mcu_power_mode {
-       RADIO_OFF = 0x30,
-       RADIO_ON = 0x31,
-       RADIO_OFF_AUTO_WAKEUP = 0x32,
-       RADIO_OFF_ADVANCE = 0x33,
-       RADIO_ON_ADVANCE = 0x34,
-};
-
-enum mcu_calibration {
-       MCU_CAL_R = 1,
-       MCU_CAL_TEMP_SENSOR,
-       MCU_CAL_RXDCOC,
-       MCU_CAL_RC,
-       MCU_CAL_SX_LOGEN,
-       MCU_CAL_LC,
-       MCU_CAL_TX_LOFT,
-       MCU_CAL_TXIQ,
-       MCU_CAL_TSSI,
-       MCU_CAL_TSSI_COMP,
-       MCU_CAL_DPD,
-       MCU_CAL_RXIQC_FI,
-       MCU_CAL_RXIQC_FD,
-       MCU_CAL_PWRON,
-       MCU_CAL_TX_SHAPING,
-};
-
-enum mt76x2_mcu_cr_mode {
-       MT_RF_CR,
-       MT_BBP_CR,
-       MT_RF_BBP_CR,
-       MT_HL_TEMP_CR_UPDATE,
-};
-
-struct mt76x2_tssi_comp {
-       u8 pa_mode;
-       u8 cal_mode;
-       u16 pad;
-
-       u8 slope0;
-       u8 slope1;
-       u8 offset0;
-       u8 offset1;
-} __packed __aligned(4);
-
-struct mt76x2_fw_header {
-       __le32 ilm_len;
-       __le32 dlm_len;
-       __le16 build_ver;
-       __le16 fw_ver;
-       u8 pad[4];
-       char build_time[16];
-};
-
-struct mt76x2_patch_header {
-       char build_time[16];
-       char platform[4];
-       char hw_version[4];
-       char patch_version[4];
-       u8 pad[2];
-};
-
-int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
-                        u32 param);
-int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
-int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
-                        bool force);
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
deleted file mode 100644 (file)
index e66f047..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include "mt76x2.h"
-#include "mt76x2_trace.h"
-
-static const struct pci_device_id mt76pci_device_table[] = {
-       { PCI_DEVICE(0x14c3, 0x7662) },
-       { PCI_DEVICE(0x14c3, 0x7612) },
-       { PCI_DEVICE(0x14c3, 0x7602) },
-       { },
-};
-
-static int
-mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
-       struct mt76x2_dev *dev;
-       int ret;
-
-       ret = pcim_enable_device(pdev);
-       if (ret)
-               return ret;
-
-       ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
-       if (ret)
-               return ret;
-
-       pci_set_master(pdev);
-
-       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-       if (ret)
-               return ret;
-
-       dev = mt76x2_alloc_device(&pdev->dev);
-       if (!dev)
-               return -ENOMEM;
-
-       mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
-
-       dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
-       dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
-
-       ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x2_irq_handler,
-                              IRQF_SHARED, KBUILD_MODNAME, dev);
-       if (ret)
-               goto error;
-
-       ret = mt76x2_register_device(dev);
-       if (ret)
-               goto error;
-
-       /* Fix up ASPM configuration */
-
-       /* RG_SSUSB_G1_CDR_BIR_LTR = 0x9 */
-       mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
-
-       /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
-       mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
-
-       /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
-       mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
-
-       return 0;
-
-error:
-       ieee80211_free_hw(mt76_hw(dev));
-       return ret;
-}
-
-static void
-mt76pci_remove(struct pci_dev *pdev)
-{
-       struct mt76_dev *mdev = pci_get_drvdata(pdev);
-       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-
-       mt76_unregister_device(mdev);
-       mt76x2_cleanup(dev);
-       ieee80211_free_hw(mdev->hw);
-}
-
-MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
-MODULE_FIRMWARE(MT7662_FIRMWARE);
-MODULE_FIRMWARE(MT7662_ROM_PATCH);
-MODULE_LICENSE("Dual BSD/GPL");
-
-static struct pci_driver mt76pci_driver = {
-       .name           = KBUILD_MODNAME,
-       .id_table       = mt76pci_device_table,
-       .probe          = mt76pci_probe,
-       .remove         = mt76pci_remove,
-};
-
-module_pci_driver(mt76pci_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
deleted file mode 100644 (file)
index 84c96c0..0000000
+++ /dev/null
@@ -1,499 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include "mt76x2.h"
-#include "mt76x2_mcu.h"
-#include "mt76x2_eeprom.h"
-
-static bool
-mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
-{
-       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-       u32 flag = 0;
-
-       if (!mt76x2_tssi_enabled(dev))
-               return false;
-
-       if (mt76x2_channel_silent(dev))
-               return false;
-
-       if (chan->band == NL80211_BAND_5GHZ)
-               flag |= BIT(0);
-
-       if (mt76x2_ext_pa_enabled(dev, chan->band))
-               flag |= BIT(8);
-
-       mt76x2_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
-       dev->cal.tssi_cal_done = true;
-       return true;
-}
-
-static void
-mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
-{
-       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-       bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
-
-       if (dev->cal.channel_cal_done)
-               return;
-
-       if (mt76x2_channel_silent(dev))
-               return;
-
-       if (!dev->cal.tssi_cal_done)
-               mt76x2_phy_tssi_init_cal(dev);
-
-       if (!mac_stopped)
-               mt76x2_mac_stop(dev, false);
-
-       if (is_5ghz)
-               mt76x2_mcu_calibrate(dev, MCU_CAL_LC, 0);
-
-       mt76x2_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
-       mt76x2_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
-       mt76x2_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
-       mt76x2_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
-       mt76x2_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
-
-       if (!mac_stopped)
-               mt76x2_mac_resume(dev);
-
-       mt76x2_apply_gain_adj(dev);
-
-       dev->cal.channel_cal_done = true;
-}
-
-void mt76x2_phy_set_antenna(struct mt76x2_dev *dev)
-{
-       u32 val;
-
-       val = mt76_rr(dev, MT_BBP(AGC, 0));
-       val &= ~(BIT(4) | BIT(1));
-       switch (dev->mt76.antenna_mask) {
-       case 1:
-               /* disable mac DAC control */
-               mt76_clear(dev, MT_BBP(IBI, 9), BIT(11));
-               mt76_clear(dev, MT_BBP(TXBE, 5), 3);
-               mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0x3);
-               mt76_rmw_field(dev, MT_BBP(CORE, 32), GENMASK(21, 20), 2);
-               /* disable DAC 1 */
-               mt76_rmw_field(dev, MT_BBP(CORE, 33), GENMASK(12, 9), 4);
-
-               val &= ~(BIT(3) | BIT(0));
-               break;
-       case 2:
-               /* disable mac DAC control */
-               mt76_clear(dev, MT_BBP(IBI, 9), BIT(11));
-               mt76_rmw_field(dev, MT_BBP(TXBE, 5), 3, 1);
-               mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0xc);
-               mt76_rmw_field(dev, MT_BBP(CORE, 32), GENMASK(21, 20), 1);
-               /* disable DAC 0 */
-               mt76_rmw_field(dev, MT_BBP(CORE, 33), GENMASK(12, 9), 1);
-
-               val &= ~BIT(3);
-               val |= BIT(0);
-               break;
-       case 3:
-       default:
-               /* enable mac DAC control */
-               mt76_set(dev, MT_BBP(IBI, 9), BIT(11));
-               mt76_set(dev, MT_BBP(TXBE, 5), 3);
-               mt76_rmw_field(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT, 0xf);
-               mt76_clear(dev, MT_BBP(CORE, 32), GENMASK(21, 20));
-               mt76_clear(dev, MT_BBP(CORE, 33), GENMASK(12, 9));
-
-               val &= ~BIT(0);
-               val |= BIT(3);
-               break;
-       }
-       mt76_wr(dev, MT_BBP(AGC, 0), val);
-}
-
-static void
-mt76x2_get_agc_gain(struct mt76x2_dev *dev, u8 *dest)
-{
-       dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN);
-       dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN);
-}
-
-static int
-mt76x2_get_rssi_gain_thresh(struct mt76x2_dev *dev)
-{
-       switch (dev->mt76.chandef.width) {
-       case NL80211_CHAN_WIDTH_80:
-               return -62;
-       case NL80211_CHAN_WIDTH_40:
-               return -65;
-       default:
-               return -68;
-       }
-}
-
-static int
-mt76x2_get_low_rssi_gain_thresh(struct mt76x2_dev *dev)
-{
-       switch (dev->mt76.chandef.width) {
-       case NL80211_CHAN_WIDTH_80:
-               return -76;
-       case NL80211_CHAN_WIDTH_40:
-               return -79;
-       default:
-               return -82;
-       }
-}
-
-static void
-mt76x2_phy_set_gain_val(struct mt76x2_dev *dev)
-{
-       u32 val;
-       u8 gain_val[2];
-
-       gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
-       gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
-
-       if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
-               val = 0x1e42 << 16;
-       else
-               val = 0x1836 << 16;
-
-       val |= 0xf8;
-
-       mt76_wr(dev, MT_BBP(AGC, 8),
-               val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0]));
-       mt76_wr(dev, MT_BBP(AGC, 9),
-               val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
-
-       if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
-               mt76x2_dfs_adjust_agc(dev);
-}
-
-static void
-mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
-{
-       u32 false_cca;
-       u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
-
-       false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
-       dev->cal.false_cca = false_cca;
-       if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
-               dev->cal.agc_gain_adjust += 2;
-       else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
-                (dev->cal.agc_gain_adjust >= limit && false_cca < 500))
-               dev->cal.agc_gain_adjust -= 2;
-       else
-               return;
-
-       mt76x2_phy_set_gain_val(dev);
-}
-
-static void
-mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
-{
-       u8 *gain = dev->cal.agc_gain_init;
-       u8 low_gain_delta, gain_delta;
-       bool gain_change;
-       int low_gain;
-       u32 val;
-
-       dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
-
-       low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
-                  (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
-
-       gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
-       dev->cal.low_gain = low_gain;
-
-       if (!gain_change) {
-               mt76x2_phy_adjust_vga_gain(dev);
-               return;
-       }
-
-       if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) {
-               mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
-               val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
-               if (low_gain == 2)
-                       val |= 0x3;
-               else
-                       val |= 0x5;
-               mt76_wr(dev, MT_BBP(AGC, 26), val);
-       } else {
-               mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
-       }
-
-       if (mt76x2_has_ext_lna(dev))
-               low_gain_delta = 10;
-       else
-               low_gain_delta = 14;
-
-       if (low_gain == 2) {
-               mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
-               mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
-               mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
-               gain_delta = low_gain_delta;
-               dev->cal.agc_gain_adjust = 0;
-       } else {
-               mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
-               if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
-                       mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
-               else
-                       mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
-               mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
-               gain_delta = 0;
-               dev->cal.agc_gain_adjust = low_gain_delta;
-       }
-
-       dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
-       dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
-       mt76x2_phy_set_gain_val(dev);
-
-       /* clear false CCA counters */
-       mt76_rr(dev, MT_RX_STAT_1);
-}
-
-int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
-                          struct cfg80211_chan_def *chandef)
-{
-       struct ieee80211_channel *chan = chandef->chan;
-       bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
-       enum nl80211_band band = chan->band;
-       u8 channel;
-
-       u32 ext_cca_chan[4] = {
-               [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
-               [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
-               [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
-               [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
-       };
-       int ch_group_index;
-       u8 bw, bw_index;
-       int freq, freq1;
-       int ret;
-
-       dev->cal.channel_cal_done = false;
-       freq = chandef->chan->center_freq;
-       freq1 = chandef->center_freq1;
-       channel = chan->hw_value;
-
-       switch (chandef->width) {
-       case NL80211_CHAN_WIDTH_40:
-               bw = 1;
-               if (freq1 > freq) {
-                       bw_index = 1;
-                       ch_group_index = 0;
-               } else {
-                       bw_index = 3;
-                       ch_group_index = 1;
-               }
-               channel += 2 - ch_group_index * 4;
-               break;
-       case NL80211_CHAN_WIDTH_80:
-               ch_group_index = (freq - freq1 + 30) / 20;
-               if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
-                       ch_group_index = 0;
-               bw = 2;
-               bw_index = ch_group_index;
-               channel += 6 - ch_group_index * 4;
-               break;
-       default:
-               bw = 0;
-               bw_index = 0;
-               ch_group_index = 0;
-               break;
-       }
-
-       mt76x2_read_rx_gain(dev);
-       mt76x2_phy_set_txpower_regs(dev, band);
-       mt76x2_configure_tx_delay(dev, band, bw);
-       mt76x2_phy_set_txpower(dev);
-
-       mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
-       mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
-
-       mt76_rmw(dev, MT_EXT_CCA_CFG,
-                (MT_EXT_CCA_CFG_CCA0 |
-                 MT_EXT_CCA_CFG_CCA1 |
-                 MT_EXT_CCA_CFG_CCA2 |
-                 MT_EXT_CCA_CFG_CCA3 |
-                 MT_EXT_CCA_CFG_CCA_MASK),
-                ext_cca_chan[ch_group_index]);
-
-       ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
-       if (ret)
-               return ret;
-
-       mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
-
-       mt76x2_phy_set_antenna(dev);
-
-       /* Enable LDPC Rx */
-       if (mt76xx_rev(dev) >= MT76XX_REV_E3)
-               mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
-
-       if (!dev->cal.init_cal_done) {
-               u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
-
-               if (val != 0xff)
-                       mt76x2_mcu_calibrate(dev, MCU_CAL_R, 0);
-       }
-
-       mt76x2_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
-
-       /* Rx LPF calibration */
-       if (!dev->cal.init_cal_done)
-               mt76x2_mcu_calibrate(dev, MCU_CAL_RC, 0);
-
-       dev->cal.init_cal_done = true;
-
-       mt76_wr(dev, MT_BBP(AGC, 61), 0xFF64A4E2);
-       mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
-       mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
-       mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
-       mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F);
-
-       if (scan)
-               return 0;
-
-       dev->cal.low_gain = -1;
-       mt76x2_phy_channel_calibrate(dev, true);
-       mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init);
-       memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
-              sizeof(dev->cal.agc_gain_cur));
-
-       /* init default values for temp compensation */
-       if (mt76x2_tssi_enabled(dev)) {
-               mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
-                              0x38);
-               mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
-                              0x38);
-       }
-
-       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
-                                    MT_CALIBRATE_INTERVAL);
-
-       return 0;
-}
-
-static void
-mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev)
-{
-       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-       struct mt76x2_tx_power_info txp;
-       struct mt76x2_tssi_comp t = {};
-
-       if (!dev->cal.tssi_cal_done)
-               return;
-
-       if (!dev->cal.tssi_comp_pending) {
-               /* TSSI trigger */
-               t.cal_mode = BIT(0);
-               mt76x2_mcu_tssi_comp(dev, &t);
-               dev->cal.tssi_comp_pending = true;
-       } else {
-               if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
-                       return;
-
-               dev->cal.tssi_comp_pending = false;
-               mt76x2_get_power_info(dev, &txp, chan);
-
-               if (mt76x2_ext_pa_enabled(dev, chan->band))
-                       t.pa_mode = 1;
-
-               t.cal_mode = BIT(1);
-               t.slope0 = txp.chain[0].tssi_slope;
-               t.offset0 = txp.chain[0].tssi_offset;
-               t.slope1 = txp.chain[1].tssi_slope;
-               t.offset1 = txp.chain[1].tssi_offset;
-               mt76x2_mcu_tssi_comp(dev, &t);
-
-               if (t.pa_mode || dev->cal.dpd_cal_done)
-                       return;
-
-               usleep_range(10000, 20000);
-               mt76x2_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
-               dev->cal.dpd_cal_done = true;
-       }
-}
-
-static void
-mt76x2_phy_temp_compensate(struct mt76x2_dev *dev)
-{
-       struct mt76x2_temp_comp t;
-       int temp, db_diff;
-
-       if (mt76x2_get_temp_comp(dev, &t))
-               return;
-
-       temp = mt76_get_field(dev, MT_TEMP_SENSOR, MT_TEMP_SENSOR_VAL);
-       temp -= t.temp_25_ref;
-       temp = (temp * 1789) / 1000 + 25;
-       dev->cal.temp = temp;
-
-       if (temp > 25)
-               db_diff = (temp - 25) / t.high_slope;
-       else
-               db_diff = (25 - temp) / t.low_slope;
-
-       db_diff = min(db_diff, t.upper_bound);
-       db_diff = max(db_diff, t.lower_bound);
-
-       mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
-                      db_diff * 2);
-       mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
-                      db_diff * 2);
-}
-
-void mt76x2_phy_calibrate(struct work_struct *work)
-{
-       struct mt76x2_dev *dev;
-
-       dev = container_of(work, struct mt76x2_dev, cal_work.work);
-       mt76x2_phy_channel_calibrate(dev, false);
-       mt76x2_phy_tssi_compensate(dev);
-       mt76x2_phy_temp_compensate(dev);
-       mt76x2_phy_update_channel_gain(dev);
-       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
-                                    MT_CALIBRATE_INTERVAL);
-}
-
-int mt76x2_phy_start(struct mt76x2_dev *dev)
-{
-       int ret;
-
-       ret = mt76x2_mcu_set_radio_state(dev, true);
-       if (ret)
-               return ret;
-
-       mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
-
-       return ret;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
deleted file mode 100644 (file)
index 9fd6ab4..0000000
+++ /dev/null
@@ -1,349 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-#include "mt76x2_eeprom.h"
-
-static void
-mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
-{
-       s8 gain;
-
-       gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
-       gain -= offset / 2;
-       mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
-}
-
-static void
-mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
-{
-       s8 gain;
-
-       gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
-       gain += offset;
-       mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
-}
-
-void mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
-{
-       s8 *gain_adj = dev->cal.rx.high_gain;
-
-       mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
-       mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
-
-       mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
-       mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
-}
-EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
-
-void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
-                                enum nl80211_band band)
-{
-       u32 pa_mode[2];
-       u32 pa_mode_adj;
-
-       if (band == NL80211_BAND_2GHZ) {
-               pa_mode[0] = 0x010055ff;
-               pa_mode[1] = 0x00550055;
-
-               mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
-               mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
-
-               if (mt76x2_ext_pa_enabled(dev, band)) {
-                       mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
-                       mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
-               } else {
-                       mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
-                       mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
-               }
-       } else {
-               pa_mode[0] = 0x0000ffff;
-               pa_mode[1] = 0x00ff00ff;
-
-               if (mt76x2_ext_pa_enabled(dev, band)) {
-                       mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
-                       mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
-               } else {
-                       mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
-                       mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
-               }
-
-               if (mt76x2_ext_pa_enabled(dev, band))
-                       pa_mode_adj = 0x04000000;
-               else
-                       pa_mode_adj = 0;
-
-               mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
-               mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
-       }
-
-       mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
-       mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
-       mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
-       mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
-
-       if (mt76x2_ext_pa_enabled(dev, band)) {
-               u32 val;
-
-               if (band == NL80211_BAND_2GHZ)
-                       val = 0x3c3c023c;
-               else
-                       val = 0x363c023c;
-
-               mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
-               mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
-               mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
-       } else {
-               if (band == NL80211_BAND_2GHZ) {
-                       u32 val = 0x0f3c3c3c;
-
-                       mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
-                       mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
-                       mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
-               } else {
-                       mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
-                       mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
-                       mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
-               }
-       }
-}
-EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
-
-static void
-mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
-{
-       int i;
-
-       for (i = 0; i < sizeof(r->all); i++)
-               if (r->all[i] > limit)
-                       r->all[i] = limit;
-}
-
-static u32
-mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
-{
-       u32 val = 0;
-
-       val |= (v1 & (BIT(6) - 1)) << 0;
-       val |= (v2 & (BIT(6) - 1)) << 8;
-       val |= (v3 & (BIT(6) - 1)) << 16;
-       val |= (v4 & (BIT(6) - 1)) << 24;
-       return val;
-}
-
-static void
-mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
-{
-       int i;
-
-       for (i = 0; i < sizeof(r->all); i++)
-               r->all[i] += offset;
-}
-
-static int
-mt76x2_get_min_rate_power(struct mt76_rate_power *r)
-{
-       int i;
-       s8 ret = 0;
-
-       for (i = 0; i < sizeof(r->all); i++) {
-               if (!r->all[i])
-                       continue;
-
-               if (ret)
-                       ret = min(ret, r->all[i]);
-               else
-                       ret = r->all[i];
-       }
-
-       return ret;
-}
-
-void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
-{
-       enum nl80211_chan_width width = dev->mt76.chandef.width;
-       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-       struct mt76x2_tx_power_info txp;
-       int txp_0, txp_1, delta = 0;
-       struct mt76_rate_power t = {};
-       int base_power, gain;
-
-       mt76x2_get_power_info(dev, &txp, chan);
-
-       if (width == NL80211_CHAN_WIDTH_40)
-               delta = txp.delta_bw40;
-       else if (width == NL80211_CHAN_WIDTH_80)
-               delta = txp.delta_bw80;
-
-       mt76x2_get_rate_power(dev, &t, chan);
-       mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
-       mt76x2_limit_rate_power(&t, dev->txpower_conf);
-       dev->txpower_cur = mt76x2_get_max_rate_power(&t);
-
-       base_power = mt76x2_get_min_rate_power(&t);
-       delta += base_power - txp.chain[0].target_power;
-       txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
-       txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
-
-       gain = min(txp_0, txp_1);
-       if (gain < 0) {
-               base_power -= gain;
-               txp_0 -= gain;
-               txp_1 -= gain;
-       } else if (gain > 0x2f) {
-               base_power -= gain - 0x2f;
-               txp_0 = 0x2f;
-               txp_1 = 0x2f;
-       }
-
-       mt76x2_add_rate_power_offset(&t, -base_power);
-       dev->target_power = txp.chain[0].target_power;
-       dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
-       dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
-       dev->rate_power = t;
-
-       mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
-       mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
-
-       mt76_wr(dev, MT_TX_PWR_CFG_0,
-               mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
-       mt76_wr(dev, MT_TX_PWR_CFG_1,
-               mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
-       mt76_wr(dev, MT_TX_PWR_CFG_2,
-               mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
-       mt76_wr(dev, MT_TX_PWR_CFG_3,
-               mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
-       mt76_wr(dev, MT_TX_PWR_CFG_4,
-               mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
-       mt76_wr(dev, MT_TX_PWR_CFG_7,
-               mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
-       mt76_wr(dev, MT_TX_PWR_CFG_8,
-               mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
-       mt76_wr(dev, MT_TX_PWR_CFG_9,
-               mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
-}
-EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
-
-void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
-                              enum nl80211_band band, u8 bw)
-{
-       u32 cfg0, cfg1;
-
-       if (mt76x2_ext_pa_enabled(dev, band)) {
-               cfg0 = bw ? 0x000b0c01 : 0x00101101;
-               cfg1 = 0x00011414;
-       } else {
-               cfg0 = bw ? 0x000b0b01 : 0x00101001;
-               cfg1 = 0x00021414;
-       }
-       mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
-       mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
-
-       mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
-}
-EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
-
-void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
-{
-       int core_val, agc_val;
-
-       switch (width) {
-       case NL80211_CHAN_WIDTH_80:
-               core_val = 3;
-               agc_val = 7;
-               break;
-       case NL80211_CHAN_WIDTH_40:
-               core_val = 2;
-               agc_val = 3;
-               break;
-       default:
-               core_val = 0;
-               agc_val = 1;
-               break;
-       }
-
-       mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
-       mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
-       mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
-       mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
-}
-EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
-
-void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
-{
-       switch (band) {
-       case NL80211_BAND_2GHZ:
-               mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
-               mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
-               break;
-       case NL80211_BAND_5GHZ:
-               mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
-               mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
-               break;
-       }
-
-       mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
-                      primary_upper);
-}
-EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
-
-int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
-{
-       struct mt76x2_sta *sta;
-       struct mt76_wcid *wcid;
-       int i, j, min_rssi = 0;
-       s8 cur_rssi;
-
-       local_bh_disable();
-       rcu_read_lock();
-
-       for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
-               unsigned long mask = dev->wcid_mask[i];
-
-               if (!mask)
-                       continue;
-
-               for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
-                       if (!(mask & 1))
-                               continue;
-
-                       wcid = rcu_dereference(dev->wcid[j]);
-                       if (!wcid)
-                               continue;
-
-                       sta = container_of(wcid, struct mt76x2_sta, wcid);
-                       spin_lock(&dev->mt76.rx_lock);
-                       if (sta->inactive_count++ < 5)
-                               cur_rssi = ewma_signal_read(&sta->rssi);
-                       else
-                               cur_rssi = 0;
-                       spin_unlock(&dev->mt76.rx_lock);
-
-                       if (cur_rssi < min_rssi)
-                               min_rssi = cur_rssi;
-               }
-       }
-
-       rcu_read_unlock();
-       local_bh_enable();
-
-       if (!min_rssi)
-               return -75;
-
-       return min_rssi;
-}
-EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
deleted file mode 100644 (file)
index 1551ea4..0000000
+++ /dev/null
@@ -1,619 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __MT76x2_REGS_H
-#define __MT76x2_REGS_H
-
-#define MT_ASIC_VERSION                        0x0000
-
-#define MT76XX_REV_E3          0x22
-#define MT76XX_REV_E4          0x33
-
-#define MT_CMB_CTRL                    0x0020
-#define MT_CMB_CTRL_XTAL_RDY           BIT(22)
-#define MT_CMB_CTRL_PLL_LD             BIT(23)
-
-#define MT_EFUSE_CTRL                  0x0024
-#define MT_EFUSE_CTRL_AOUT             GENMASK(5, 0)
-#define MT_EFUSE_CTRL_MODE             GENMASK(7, 6)
-#define MT_EFUSE_CTRL_LDO_OFF_TIME     GENMASK(13, 8)
-#define MT_EFUSE_CTRL_LDO_ON_TIME      GENMASK(15, 14)
-#define MT_EFUSE_CTRL_AIN              GENMASK(25, 16)
-#define MT_EFUSE_CTRL_KICK             BIT(30)
-#define MT_EFUSE_CTRL_SEL              BIT(31)
-
-#define MT_EFUSE_DATA_BASE             0x0028
-#define MT_EFUSE_DATA(_n)              (MT_EFUSE_DATA_BASE + ((_n) << 2))
-
-#define MT_COEXCFG0                    0x0040
-#define MT_COEXCFG0_COEX_EN            BIT(0)
-
-#define MT_WLAN_FUN_CTRL               0x0080
-#define MT_WLAN_FUN_CTRL_WLAN_EN       BIT(0)
-#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN   BIT(1)
-#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
-
-#define MT_WLAN_FUN_CTRL_WLAN_RESET    BIT(3) /* MT76x0 */
-#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
-
-#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ  BIT(4)
-#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL        BIT(5)
-#define MT_WLAN_FUN_CTRL_INV_ANT_SEL   BIT(6)
-#define MT_WLAN_FUN_CTRL_WAKE_HOST     BIT(7)
-
-#define MT_WLAN_FUN_CTRL_THERM_RST     BIT(8) /* MT76x2 */
-#define MT_WLAN_FUN_CTRL_THERM_CKEN    BIT(9) /* MT76x2 */
-
-#define MT_WLAN_FUN_CTRL_GPIO_IN       GENMASK(15, 8) /* MT76x0 */
-#define MT_WLAN_FUN_CTRL_GPIO_OUT      GENMASK(23, 16) /* MT76x0 */
-#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN   GENMASK(31, 24) /* MT76x0 */
-
-#define MT_XO_CTRL0                    0x0100
-#define MT_XO_CTRL1                    0x0104
-#define MT_XO_CTRL2                    0x0108
-#define MT_XO_CTRL3                    0x010c
-#define MT_XO_CTRL4                    0x0110
-
-#define MT_XO_CTRL5                    0x0114
-#define MT_XO_CTRL5_C2_VAL             GENMASK(14, 8)
-
-#define MT_XO_CTRL6                    0x0118
-#define MT_XO_CTRL6_C2_CTRL            GENMASK(14, 8)
-
-#define MT_XO_CTRL7                    0x011c
-
-#define MT_USB_U3DMA_CFG               0x9018
-#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT        GENMASK(7, 0)
-#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
-#define MT_USB_DMA_CFG_UDMA_TX_WL_DROP BIT(16)
-#define MT_USB_DMA_CFG_WAKE_UP_EN      BIT(17)
-#define MT_USB_DMA_CFG_RX_DROP_OR_PAD  BIT(18)
-#define MT_USB_DMA_CFG_TX_CLR          BIT(19)
-#define MT_USB_DMA_CFG_TXOP_HALT       BIT(20)
-#define MT_USB_DMA_CFG_RX_BULK_AGG_EN  BIT(21)
-#define MT_USB_DMA_CFG_RX_BULK_EN      BIT(22)
-#define MT_USB_DMA_CFG_TX_BULK_EN      BIT(23)
-#define MT_USB_DMA_CFG_EP_OUT_VALID    GENMASK(29, 24)
-#define MT_USB_DMA_CFG_RX_BUSY         BIT(30)
-#define MT_USB_DMA_CFG_TX_BUSY         BIT(31)
-
-#define MT_WLAN_MTC_CTRL               0x10148
-#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
-#define MT_WLAN_MTC_CTRL_PWR_ACK       BIT(12)
-#define MT_WLAN_MTC_CTRL_PWR_ACK_S     BIT(13)
-#define MT_WLAN_MTC_CTRL_BBP_MEM_PD    GENMASK(19, 16)
-#define MT_WLAN_MTC_CTRL_PBF_MEM_PD    BIT(20)
-#define MT_WLAN_MTC_CTRL_FCE_MEM_PD    BIT(21)
-#define MT_WLAN_MTC_CTRL_TSO_MEM_PD    BIT(22)
-#define MT_WLAN_MTC_CTRL_BBP_MEM_RB    BIT(24)
-#define MT_WLAN_MTC_CTRL_PBF_MEM_RB    BIT(25)
-#define MT_WLAN_MTC_CTRL_FCE_MEM_RB    BIT(26)
-#define MT_WLAN_MTC_CTRL_TSO_MEM_RB    BIT(27)
-#define MT_WLAN_MTC_CTRL_STATE_UP      BIT(28)
-
-#define MT_INT_SOURCE_CSR              0x0200
-#define MT_INT_MASK_CSR                        0x0204
-
-#define MT_INT_RX_DONE(_n)             BIT(_n)
-#define MT_INT_RX_DONE_ALL             GENMASK(1, 0)
-#define MT_INT_TX_DONE_ALL             GENMASK(13, 4)
-#define MT_INT_TX_DONE(_n)             BIT(_n + 4)
-#define MT_INT_RX_COHERENT             BIT(16)
-#define MT_INT_TX_COHERENT             BIT(17)
-#define MT_INT_ANY_COHERENT            BIT(18)
-#define MT_INT_MCU_CMD                 BIT(19)
-#define MT_INT_TBTT                    BIT(20)
-#define MT_INT_PRE_TBTT                        BIT(21)
-#define MT_INT_TX_STAT                 BIT(22)
-#define MT_INT_AUTO_WAKEUP             BIT(23)
-#define MT_INT_GPTIMER                 BIT(24)
-#define MT_INT_RXDELAYINT              BIT(26)
-#define MT_INT_TXDELAYINT              BIT(27)
-
-#define MT_WPDMA_GLO_CFG               0x0208
-#define MT_WPDMA_GLO_CFG_TX_DMA_EN     BIT(0)
-#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY   BIT(1)
-#define MT_WPDMA_GLO_CFG_RX_DMA_EN     BIT(2)
-#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY   BIT(3)
-#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE        GENMASK(5, 4)
-#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE     BIT(6)
-#define MT_WPDMA_GLO_CFG_BIG_ENDIAN    BIT(7)
-#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN   GENMASK(15, 8)
-#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS  BIT(30)
-#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET  BIT(31)
-
-#define MT_WPDMA_RST_IDX               0x020c
-
-#define MT_WPDMA_DELAY_INT_CFG         0x0210
-
-#define MT_WMM_AIFSN           0x0214
-#define MT_WMM_AIFSN_MASK              GENMASK(3, 0)
-#define MT_WMM_AIFSN_SHIFT(_n)         ((_n) * 4)
-
-#define MT_WMM_CWMIN           0x0218
-#define MT_WMM_CWMIN_MASK              GENMASK(3, 0)
-#define MT_WMM_CWMIN_SHIFT(_n)         ((_n) * 4)
-
-#define MT_WMM_CWMAX           0x021c
-#define MT_WMM_CWMAX_MASK              GENMASK(3, 0)
-#define MT_WMM_CWMAX_SHIFT(_n)         ((_n) * 4)
-
-#define MT_WMM_TXOP_BASE               0x0220
-#define MT_WMM_TXOP(_n)                        (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
-#define MT_WMM_TXOP_SHIFT(_n)          ((_n & 1) * 16)
-#define MT_WMM_TXOP_MASK               GENMASK(15, 0)
-
-#define MT_TSO_CTRL                    0x0250
-#define MT_HEADER_TRANS_CTRL_REG       0x0260
-
-#define MT_TX_RING_BASE                        0x0300
-#define MT_RX_RING_BASE                        0x03c0
-
-#define MT_TX_HW_QUEUE_MCU             8
-#define MT_TX_HW_QUEUE_MGMT            9
-
-#define MT_US_CYC_CFG                  0x02a4
-#define MT_US_CYC_CNT                  GENMASK(7, 0)
-
-#define MT_PBF_SYS_CTRL                        0x0400
-#define MT_PBF_SYS_CTRL_MCU_RESET      BIT(0)
-#define MT_PBF_SYS_CTRL_DMA_RESET      BIT(1)
-#define MT_PBF_SYS_CTRL_MAC_RESET      BIT(2)
-#define MT_PBF_SYS_CTRL_PBF_RESET      BIT(3)
-#define MT_PBF_SYS_CTRL_ASY_RESET      BIT(4)
-
-#define MT_PBF_CFG                     0x0404
-#define MT_PBF_CFG_TX0Q_EN             BIT(0)
-#define MT_PBF_CFG_TX1Q_EN             BIT(1)
-#define MT_PBF_CFG_TX2Q_EN             BIT(2)
-#define MT_PBF_CFG_TX3Q_EN             BIT(3)
-#define MT_PBF_CFG_RX0Q_EN             BIT(4)
-#define MT_PBF_CFG_RX_DROP_EN          BIT(8)
-
-#define MT_PBF_TX_MAX_PCNT             0x0408
-#define MT_PBF_RX_MAX_PCNT             0x040c
-
-#define MT_BCN_OFFSET_BASE             0x041c
-#define MT_BCN_OFFSET(_n)              (MT_BCN_OFFSET_BASE + ((_n) << 2))
-
-#define MT_RF_BYPASS_0                 0x0504
-#define MT_RF_BYPASS_1                 0x0508
-#define MT_RF_SETTING_0                        0x050c
-
-#define MT_RF_DATA_WRITE               0x0524
-
-#define MT_RF_CTRL                     0x0528
-#define MT_RF_CTRL_ADDR                        GENMASK(11, 0)
-#define MT_RF_CTRL_WRITE               BIT(12)
-#define MT_RF_CTRL_BUSY                        BIT(13)
-#define MT_RF_CTRL_IDX                 BIT(16)
-
-#define MT_RF_DATA_READ                        0x052c
-
-#define MT_FCE_PSE_CTRL                        0x0800
-#define MT_FCE_PARAMETERS              0x0804
-#define MT_FCE_CSO                     0x0808
-
-#define MT_FCE_L2_STUFF                        0x080c
-#define MT_FCE_L2_STUFF_HT_L2_EN       BIT(0)
-#define MT_FCE_L2_STUFF_QOS_L2_EN      BIT(1)
-#define MT_FCE_L2_STUFF_RX_STUFF_EN    BIT(2)
-#define MT_FCE_L2_STUFF_TX_STUFF_EN    BIT(3)
-#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
-#define MT_FCE_L2_STUFF_MVINV_BSWAP    BIT(5)
-#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
-#define MT_FCE_L2_STUFF_TS_LEN_EN      GENMASK(23, 16)
-#define MT_FCE_L2_STUFF_OTHER_PORT     GENMASK(25, 24)
-
-#define MT_FCE_WLAN_FLOW_CONTROL1      0x0824
-
-#define MT_TX_CPU_FROM_FCE_BASE_PTR    0x09a0
-#define MT_TX_CPU_FROM_FCE_MAX_COUNT   0x09a4
-#define MT_FCE_PDMA_GLOBAL_CONF                0x09c4
-#define MT_FCE_SKIP_FS                 0x0a6c
-
-#define MT_PAUSE_ENABLE_CONTROL1       0x0a38
-
-#define MT_MAC_CSR0                    0x1000
-
-#define MT_MAC_SYS_CTRL                        0x1004
-#define MT_MAC_SYS_CTRL_RESET_CSR      BIT(0)
-#define MT_MAC_SYS_CTRL_RESET_BBP      BIT(1)
-#define MT_MAC_SYS_CTRL_ENABLE_TX      BIT(2)
-#define MT_MAC_SYS_CTRL_ENABLE_RX      BIT(3)
-
-#define MT_MAC_ADDR_DW0                        0x1008
-#define MT_MAC_ADDR_DW1                        0x100c
-#define MT_MAC_ADDR_DW1_U2ME_MASK      GENMASK(23, 16)
-
-#define MT_MAC_BSSID_DW0               0x1010
-#define MT_MAC_BSSID_DW1               0x1014
-#define MT_MAC_BSSID_DW1_ADDR          GENMASK(15, 0)
-#define MT_MAC_BSSID_DW1_MBSS_MODE     GENMASK(17, 16)
-#define MT_MAC_BSSID_DW1_MBEACON_N     GENMASK(20, 18)
-#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT        BIT(21)
-#define MT_MAC_BSSID_DW1_MBSS_MODE_B2  BIT(22)
-#define MT_MAC_BSSID_DW1_MBEACON_N_B3  BIT(23)
-#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
-
-#define MT_MAX_LEN_CFG                 0x1018
-
-#define MT_AMPDU_MAX_LEN_20M1S         0x1030
-#define MT_AMPDU_MAX_LEN_20M2S         0x1034
-#define MT_AMPDU_MAX_LEN_40M1S         0x1038
-#define MT_AMPDU_MAX_LEN_40M2S         0x103c
-#define MT_AMPDU_MAX_LEN               0x1040
-
-#define MT_WCID_DROP_BASE              0x106c
-#define MT_WCID_DROP(_n)               (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
-#define MT_WCID_DROP_MASK(_n)          BIT((_n) % 32)
-
-#define MT_BCN_BYPASS_MASK             0x108c
-
-#define MT_MAC_APC_BSSID_BASE          0x1090
-#define MT_MAC_APC_BSSID_L(_n)         (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
-#define MT_MAC_APC_BSSID_H(_n)         (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
-#define MT_MAC_APC_BSSID_H_ADDR                GENMASK(15, 0)
-#define MT_MAC_APC_BSSID0_H_EN         BIT(16)
-
-#define MT_XIFS_TIME_CFG               0x1100
-#define MT_XIFS_TIME_CFG_CCK_SIFS      GENMASK(7, 0)
-#define MT_XIFS_TIME_CFG_OFDM_SIFS     GENMASK(15, 8)
-#define MT_XIFS_TIME_CFG_OFDM_XIFS     GENMASK(19, 16)
-#define MT_XIFS_TIME_CFG_EIFS          GENMASK(28, 20)
-#define MT_XIFS_TIME_CFG_BB_RXEND_EN   BIT(29)
-
-#define MT_BKOFF_SLOT_CFG              0x1104
-#define MT_BKOFF_SLOT_CFG_SLOTTIME     GENMASK(7, 0)
-#define MT_BKOFF_SLOT_CFG_CC_DELAY     GENMASK(11, 8)
-
-#define MT_CH_TIME_CFG                 0x110c
-#define MT_CH_TIME_CFG_TIMER_EN                BIT(0)
-#define MT_CH_TIME_CFG_TX_AS_BUSY      BIT(1)
-#define MT_CH_TIME_CFG_RX_AS_BUSY      BIT(2)
-#define MT_CH_TIME_CFG_NAV_AS_BUSY     BIT(3)
-#define MT_CH_TIME_CFG_EIFS_AS_BUSY    BIT(4)
-#define MT_CH_TIME_CFG_MDRDY_CNT_EN    BIT(5)
-#define MT_CH_TIME_CFG_CH_TIMER_CLR    GENMASK(9, 8)
-#define MT_CH_TIME_CFG_MDRDY_CLR       GENMASK(11, 10)
-
-#define MT_PBF_LIFE_TIMER              0x1110
-
-#define MT_BEACON_TIME_CFG             0x1114
-#define MT_BEACON_TIME_CFG_INTVAL      GENMASK(15, 0)
-#define MT_BEACON_TIME_CFG_TIMER_EN    BIT(16)
-#define MT_BEACON_TIME_CFG_SYNC_MODE   GENMASK(18, 17)
-#define MT_BEACON_TIME_CFG_TBTT_EN     BIT(19)
-#define MT_BEACON_TIME_CFG_BEACON_TX   BIT(20)
-#define MT_BEACON_TIME_CFG_TSF_COMP    GENMASK(31, 24)
-
-#define MT_TBTT_SYNC_CFG               0x1118
-#define MT_TBTT_TIMER_CFG              0x1124
-
-#define MT_INT_TIMER_CFG               0x1128
-#define MT_INT_TIMER_CFG_PRE_TBTT      GENMASK(15, 0)
-#define MT_INT_TIMER_CFG_GP_TIMER      GENMASK(31, 16)
-
-#define MT_INT_TIMER_EN                        0x112c
-#define MT_INT_TIMER_EN_PRE_TBTT_EN    BIT(0)
-#define MT_INT_TIMER_EN_GP_TIMER_EN    BIT(1)
-
-#define MT_CH_IDLE                     0x1130
-#define MT_CH_BUSY                     0x1134
-#define MT_EXT_CH_BUSY                 0x1138
-#define MT_ED_CCA_TIMER                        0x1140
-
-#define MT_MAC_STATUS                  0x1200
-#define MT_MAC_STATUS_TX               BIT(0)
-#define MT_MAC_STATUS_RX               BIT(1)
-
-#define MT_PWR_PIN_CFG                 0x1204
-#define MT_AUX_CLK_CFG                 0x120c
-
-#define MT_BB_PA_MODE_CFG0             0x1214
-#define MT_BB_PA_MODE_CFG1             0x1218
-#define MT_RF_PA_MODE_CFG0             0x121c
-#define MT_RF_PA_MODE_CFG1             0x1220
-
-#define MT_RF_PA_MODE_ADJ0             0x1228
-#define MT_RF_PA_MODE_ADJ1             0x122c
-
-#define MT_DACCLK_EN_DLY_CFG           0x1264
-
-#define MT_EDCA_CFG_BASE               0x1300
-#define MT_EDCA_CFG_AC(_n)             (MT_EDCA_CFG_BASE + ((_n) << 2))
-#define MT_EDCA_CFG_TXOP               GENMASK(7, 0)
-#define MT_EDCA_CFG_AIFSN              GENMASK(11, 8)
-#define MT_EDCA_CFG_CWMIN              GENMASK(15, 12)
-#define MT_EDCA_CFG_CWMAX              GENMASK(19, 16)
-
-#define MT_TX_PWR_CFG_0                        0x1314
-#define MT_TX_PWR_CFG_1                        0x1318
-#define MT_TX_PWR_CFG_2                        0x131c
-#define MT_TX_PWR_CFG_3                        0x1320
-#define MT_TX_PWR_CFG_4                        0x1324
-#define MT_TX_PIN_CFG                  0x1328
-#define MT_TX_PIN_CFG_TXANT            GENMASK(3, 0)
-
-#define MT_TX_BAND_CFG                 0x132c
-#define MT_TX_BAND_CFG_UPPER_40M       BIT(0)
-#define MT_TX_BAND_CFG_5G              BIT(1)
-#define MT_TX_BAND_CFG_2G              BIT(2)
-
-#define MT_HT_FBK_TO_LEGACY            0x1384
-#define MT_TX_MPDU_ADJ_INT             0x1388
-
-#define MT_TX_PWR_CFG_7                        0x13d4
-#define MT_TX_PWR_CFG_8                        0x13d8
-#define MT_TX_PWR_CFG_9                        0x13dc
-
-#define MT_TX_SW_CFG0                  0x1330
-#define MT_TX_SW_CFG1                  0x1334
-#define MT_TX_SW_CFG2                  0x1338
-
-#define MT_TXOP_CTRL_CFG               0x1340
-
-#define MT_TX_RTS_CFG                  0x1344
-#define MT_TX_RTS_CFG_RETRY_LIMIT      GENMASK(7, 0)
-#define MT_TX_RTS_CFG_THRESH           GENMASK(23, 8)
-#define MT_TX_RTS_FALLBACK             BIT(24)
-
-#define MT_TX_TIMEOUT_CFG              0x1348
-#define MT_TX_TIMEOUT_CFG_ACKTO                GENMASK(15, 8)
-
-#define MT_TX_RETRY_CFG                        0x134c
-#define MT_TX_LINK_CFG                 0x1350
-#define MT_VHT_HT_FBK_CFG1             0x1358
-
-#define MT_PROT_CFG_RATE               GENMASK(15, 0)
-#define MT_PROT_CFG_CTRL               GENMASK(17, 16)
-#define MT_PROT_CFG_NAV                        GENMASK(19, 18)
-#define MT_PROT_CFG_TXOP_ALLOW         GENMASK(25, 20)
-#define MT_PROT_CFG_RTS_THRESH         BIT(26)
-
-#define MT_CCK_PROT_CFG                        0x1364
-#define MT_OFDM_PROT_CFG               0x1368
-#define MT_MM20_PROT_CFG               0x136c
-#define MT_MM40_PROT_CFG               0x1370
-#define MT_GF20_PROT_CFG               0x1374
-#define MT_GF40_PROT_CFG               0x1378
-
-#define MT_EXP_ACK_TIME                        0x1380
-
-#define MT_TX_PWR_CFG_0_EXT            0x1390
-#define MT_TX_PWR_CFG_1_EXT            0x1394
-
-#define MT_TX_FBK_LIMIT                        0x1398
-#define MT_TX_FBK_LIMIT_MPDU_FBK       GENMASK(7, 0)
-#define MT_TX_FBK_LIMIT_AMPDU_FBK      GENMASK(15, 8)
-#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR  BIT(16)
-#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
-#define MT_TX_FBK_LIMIT_RATE_LUT       BIT(18)
-
-#define MT_TX0_RF_GAIN_CORR            0x13a0
-#define MT_TX1_RF_GAIN_CORR            0x13a4
-
-#define MT_TX_ALC_CFG_0                        0x13b0
-#define MT_TX_ALC_CFG_0_CH_INIT_0      GENMASK(5, 0)
-#define MT_TX_ALC_CFG_0_CH_INIT_1      GENMASK(13, 8)
-#define MT_TX_ALC_CFG_0_LIMIT_0                GENMASK(21, 16)
-#define MT_TX_ALC_CFG_0_LIMIT_1                GENMASK(29, 24)
-
-#define MT_TX_ALC_CFG_1                        0x13b4
-#define MT_TX_ALC_CFG_1_TEMP_COMP      GENMASK(5, 0)
-
-#define MT_TX_ALC_CFG_2                        0x13a8
-#define MT_TX_ALC_CFG_2_TEMP_COMP      GENMASK(5, 0)
-
-#define MT_TX_ALC_CFG_3                        0x13ac
-#define MT_TX_ALC_CFG_4                        0x13c0
-#define MT_TX_ALC_CFG_4_LOWGAIN_CH_EN  BIT(31)
-
-#define MT_TX_ALC_VGA3                 0x13c8
-
-#define MT_TX_PROT_CFG6                        0x13e0
-#define MT_TX_PROT_CFG7                        0x13e4
-#define MT_TX_PROT_CFG8                        0x13e8
-
-#define MT_PIFS_TX_CFG                 0x13ec
-
-#define MT_RX_FILTR_CFG                        0x1400
-
-#define MT_RX_FILTR_CFG_CRC_ERR                BIT(0)
-#define MT_RX_FILTR_CFG_PHY_ERR                BIT(1)
-#define MT_RX_FILTR_CFG_PROMISC                BIT(2)
-#define MT_RX_FILTR_CFG_OTHER_BSS      BIT(3)
-#define MT_RX_FILTR_CFG_VER_ERR                BIT(4)
-#define MT_RX_FILTR_CFG_MCAST          BIT(5)
-#define MT_RX_FILTR_CFG_BCAST          BIT(6)
-#define MT_RX_FILTR_CFG_DUP            BIT(7)
-#define MT_RX_FILTR_CFG_CFACK          BIT(8)
-#define MT_RX_FILTR_CFG_CFEND          BIT(9)
-#define MT_RX_FILTR_CFG_ACK            BIT(10)
-#define MT_RX_FILTR_CFG_CTS            BIT(11)
-#define MT_RX_FILTR_CFG_RTS            BIT(12)
-#define MT_RX_FILTR_CFG_PSPOLL         BIT(13)
-#define MT_RX_FILTR_CFG_BA             BIT(14)
-#define MT_RX_FILTR_CFG_BAR            BIT(15)
-#define MT_RX_FILTR_CFG_CTRL_RSV       BIT(16)
-
-#define MT_AUTO_RSP_CFG                        0x1404
-#define MT_LEGACY_BASIC_RATE           0x1408
-#define MT_HT_BASIC_RATE               0x140c
-
-#define MT_HT_CTRL_CFG                 0x1410
-
-#define MT_EXT_CCA_CFG                 0x141c
-#define MT_EXT_CCA_CFG_CCA0            GENMASK(1, 0)
-#define MT_EXT_CCA_CFG_CCA1            GENMASK(3, 2)
-#define MT_EXT_CCA_CFG_CCA2            GENMASK(5, 4)
-#define MT_EXT_CCA_CFG_CCA3            GENMASK(7, 6)
-#define MT_EXT_CCA_CFG_CCA_MASK                GENMASK(11, 8)
-#define MT_EXT_CCA_CFG_ED_CCA_MASK     GENMASK(15, 12)
-
-#define MT_TX_SW_CFG3                  0x1478
-
-#define MT_PN_PAD_MODE                 0x150c
-
-#define MT_TXOP_HLDR_ET                        0x1608
-
-#define MT_PROT_AUTO_TX_CFG            0x1648
-#define MT_PROT_AUTO_TX_CFG_PROT_PADJ  GENMASK(11, 8)
-#define MT_PROT_AUTO_TX_CFG_AUTO_PADJ  GENMASK(27, 24)
-
-#define MT_RX_STAT_0                   0x1700
-#define MT_RX_STAT_0_CRC_ERRORS                GENMASK(15, 0)
-#define MT_RX_STAT_0_PHY_ERRORS                GENMASK(31, 16)
-
-#define MT_RX_STAT_1                   0x1704
-#define MT_RX_STAT_1_CCA_ERRORS                GENMASK(15, 0)
-#define MT_RX_STAT_1_PLCP_ERRORS       GENMASK(31, 16)
-
-#define MT_RX_STAT_2                   0x1708
-#define MT_RX_STAT_2_DUP_ERRORS                GENMASK(15, 0)
-#define MT_RX_STAT_2_OVERFLOW_ERRORS   GENMASK(31, 16)
-
-#define MT_TX_STA_0                    0x170c
-#define MT_TX_STA_1                    0x1710
-#define MT_TX_STA_2                    0x1714
-
-#define MT_TX_STAT_FIFO                        0x1718
-#define MT_TX_STAT_FIFO_VALID          BIT(0)
-#define MT_TX_STAT_FIFO_SUCCESS                BIT(5)
-#define MT_TX_STAT_FIFO_AGGR           BIT(6)
-#define MT_TX_STAT_FIFO_ACKREQ         BIT(7)
-#define MT_TX_STAT_FIFO_WCID           GENMASK(15, 8)
-#define MT_TX_STAT_FIFO_RATE           GENMASK(31, 16)
-
-#define MT_TX_AGG_CNT_BASE0            0x1720
-#define MT_TX_AGG_CNT_BASE1            0x174c
-
-#define MT_TX_AGG_CNT(_id)             ((_id) < 8 ?                    \
-                                        MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
-                                        MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
-
-#define MT_TX_STAT_FIFO_EXT            0x1798
-#define MT_TX_STAT_FIFO_EXT_RETRY      GENMASK(7, 0)
-#define MT_TX_STAT_FIFO_EXT_PKTID      GENMASK(15, 8)
-
-#define MT_WCID_TX_RATE_BASE           0x1c00
-#define MT_WCID_TX_RATE(_i)            (MT_WCID_TX_RATE_BASE + ((_i) << 3))
-
-#define MT_BBP_CORE_BASE               0x2000
-#define MT_BBP_IBI_BASE                        0x2100
-#define MT_BBP_AGC_BASE                        0x2300
-#define MT_BBP_TXC_BASE                        0x2400
-#define MT_BBP_RXC_BASE                        0x2500
-#define MT_BBP_TXO_BASE                        0x2600
-#define MT_BBP_TXBE_BASE               0x2700
-#define MT_BBP_RXFE_BASE               0x2800
-#define MT_BBP_RXO_BASE                        0x2900
-#define MT_BBP_DFS_BASE                        0x2a00
-#define MT_BBP_TR_BASE                 0x2b00
-#define MT_BBP_CAL_BASE                        0x2c00
-#define MT_BBP_DSC_BASE                        0x2e00
-#define MT_BBP_PFMU_BASE               0x2f00
-
-#define MT_BBP(_type, _n)              (MT_BBP_##_type##_BASE + ((_n) << 2))
-
-#define MT_BBP_CORE_R1_BW              GENMASK(4, 3)
-
-#define MT_BBP_AGC_R0_CTRL_CHAN                GENMASK(9, 8)
-#define MT_BBP_AGC_R0_BW               GENMASK(14, 12)
-
-/* AGC, R4/R5 */
-#define MT_BBP_AGC_LNA_HIGH_GAIN       GENMASK(21, 16)
-#define MT_BBP_AGC_LNA_MID_GAIN                GENMASK(13, 8)
-#define MT_BBP_AGC_LNA_LOW_GAIN                GENMASK(5, 0)
-
-/* AGC, R6/R7 */
-#define MT_BBP_AGC_LNA_ULOW_GAIN       GENMASK(5, 0)
-
-/* AGC, R8/R9 */
-#define MT_BBP_AGC_LNA_GAIN_MODE       GENMASK(7, 6)
-#define MT_BBP_AGC_GAIN                        GENMASK(14, 8)
-
-#define MT_BBP_AGC20_RSSI0             GENMASK(7, 0)
-#define MT_BBP_AGC20_RSSI1             GENMASK(15, 8)
-
-#define MT_BBP_TXBE_R0_CTRL_CHAN       GENMASK(1, 0)
-
-#define MT_WCID_ADDR_BASE              0x1800
-#define MT_WCID_ADDR(_n)               (MT_WCID_ADDR_BASE + (_n) * 8)
-
-#define MT_SRAM_BASE                   0x4000
-
-#define MT_WCID_KEY_BASE               0x8000
-#define MT_WCID_KEY(_n)                        (MT_WCID_KEY_BASE + (_n) * 32)
-
-#define MT_WCID_IV_BASE                        0xa000
-#define MT_WCID_IV(_n)                 (MT_WCID_IV_BASE + (_n) * 8)
-
-#define MT_WCID_ATTR_BASE              0xa800
-#define MT_WCID_ATTR(_n)               (MT_WCID_ATTR_BASE + (_n) * 4)
-
-#define MT_WCID_ATTR_PAIRWISE          BIT(0)
-#define MT_WCID_ATTR_PKEY_MODE         GENMASK(3, 1)
-#define MT_WCID_ATTR_BSS_IDX           GENMASK(6, 4)
-#define MT_WCID_ATTR_RXWI_UDF          GENMASK(9, 7)
-#define MT_WCID_ATTR_PKEY_MODE_EXT     BIT(10)
-#define MT_WCID_ATTR_BSS_IDX_EXT       BIT(11)
-#define MT_WCID_ATTR_WAPI_MCBC         BIT(15)
-#define MT_WCID_ATTR_WAPI_KEYID                GENMASK(31, 24)
-
-#define MT_SKEY_BASE_0                 0xac00
-#define MT_SKEY_BASE_1                 0xb400
-#define MT_SKEY_0(_bss, _idx)          (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
-#define MT_SKEY_1(_bss, _idx)          (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
-#define MT_SKEY(_bss, _idx)            ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
-
-#define MT_SKEY_MODE_BASE_0            0xb000
-#define MT_SKEY_MODE_BASE_1            0xb3f0
-#define MT_SKEY_MODE_0(_bss)           (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
-#define MT_SKEY_MODE_1(_bss)           (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
-#define MT_SKEY_MODE(_bss)             ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
-#define MT_SKEY_MODE_MASK              GENMASK(3, 0)
-#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
-
-#define MT_BEACON_BASE                 0xc000
-
-#define MT_TEMP_SENSOR                 0x1d000
-#define MT_TEMP_SENSOR_VAL             GENMASK(6, 0)
-
-struct mt76_wcid_addr {
-       u8 macaddr[6];
-       __le16 ba_mask;
-} __packed __aligned(4);
-
-struct mt76_wcid_key {
-       u8 key[16];
-       u8 tx_mic[8];
-       u8 rx_mic[8];
-} __packed __aligned(4);
-
-enum mt76x2_cipher_type {
-       MT_CIPHER_NONE,
-       MT_CIPHER_WEP40,
-       MT_CIPHER_WEP104,
-       MT_CIPHER_TKIP,
-       MT_CIPHER_AES_CCMP,
-       MT_CIPHER_CKIP40,
-       MT_CIPHER_CKIP104,
-       MT_CIPHER_CKIP128,
-       MT_CIPHER_WAPI,
-};
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c
deleted file mode 100644 (file)
index a09f117..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/module.h>
-
-#ifndef __CHECKER__
-#define CREATE_TRACE_POINTS
-#include "mt76x2_trace.h"
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h
deleted file mode 100644 (file)
index 4cd4241..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#if !defined(__MT76x2_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define __MT76x2_TRACE_H
-
-#include <linux/tracepoint.h>
-#include "mt76x2.h"
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM mt76x2
-
-#define MAXNAME                32
-#define DEV_ENTRY      __array(char, wiphy_name, 32)
-#define DEV_ASSIGN     strlcpy(__entry->wiphy_name, wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
-#define DEV_PR_FMT     "%s"
-#define DEV_PR_ARG     __entry->wiphy_name
-
-#define TXID_ENTRY     __field(u8, wcid) __field(u8, pktid)
-#define TXID_ASSIGN    __entry->wcid = wcid; __entry->pktid = pktid
-#define TXID_PR_FMT    " [%d:%d]"
-#define TXID_PR_ARG    __entry->wcid, __entry->pktid
-
-DECLARE_EVENT_CLASS(dev_evt,
-       TP_PROTO(struct mt76x2_dev *dev),
-       TP_ARGS(dev),
-       TP_STRUCT__entry(
-               DEV_ENTRY
-       ),
-       TP_fast_assign(
-               DEV_ASSIGN;
-       ),
-       TP_printk(DEV_PR_FMT, DEV_PR_ARG)
-);
-
-DECLARE_EVENT_CLASS(dev_txid_evt,
-       TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
-       TP_ARGS(dev, wcid, pktid),
-       TP_STRUCT__entry(
-               DEV_ENTRY
-               TXID_ENTRY
-       ),
-       TP_fast_assign(
-               DEV_ASSIGN;
-               TXID_ASSIGN;
-       ),
-       TP_printk(
-               DEV_PR_FMT TXID_PR_FMT,
-               DEV_PR_ARG, TXID_PR_ARG
-       )
-);
-
-DEFINE_EVENT(dev_evt, mac_txstat_poll,
-       TP_PROTO(struct mt76x2_dev *dev),
-       TP_ARGS(dev)
-);
-
-DEFINE_EVENT(dev_txid_evt, mac_txdone_add,
-       TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
-       TP_ARGS(dev, wcid, pktid)
-);
-
-TRACE_EVENT(mac_txstat_fetch,
-       TP_PROTO(struct mt76x2_dev *dev,
-                struct mt76x2_tx_status *stat),
-
-       TP_ARGS(dev, stat),
-
-       TP_STRUCT__entry(
-               DEV_ENTRY
-               TXID_ENTRY
-               __field(bool, success)
-               __field(bool, aggr)
-               __field(bool, ack_req)
-               __field(u16, rate)
-               __field(u8, retry)
-       ),
-
-       TP_fast_assign(
-               DEV_ASSIGN;
-               __entry->success = stat->success;
-               __entry->aggr = stat->aggr;
-               __entry->ack_req = stat->ack_req;
-               __entry->wcid = stat->wcid;
-               __entry->pktid = stat->pktid;
-               __entry->rate = stat->rate;
-               __entry->retry = stat->retry;
-       ),
-
-       TP_printk(
-               DEV_PR_FMT TXID_PR_FMT
-               " success:%d aggr:%d ack_req:%d"
-               " rate:%04x retry:%d",
-               DEV_PR_ARG, TXID_PR_ARG,
-               __entry->success, __entry->aggr, __entry->ack_req,
-               __entry->rate, __entry->retry
-       )
-);
-
-
-TRACE_EVENT(dev_irq,
-       TP_PROTO(struct mt76x2_dev *dev, u32 val, u32 mask),
-
-       TP_ARGS(dev, val, mask),
-
-       TP_STRUCT__entry(
-               DEV_ENTRY
-               __field(u32, val)
-               __field(u32, mask)
-       ),
-
-       TP_fast_assign(
-               DEV_ASSIGN;
-               __entry->val = val;
-               __entry->mask = mask;
-       ),
-
-       TP_printk(
-               DEV_PR_FMT " %08x & %08x",
-               DEV_PR_ARG, __entry->val, __entry->mask
-       )
-);
-
-#endif
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE mt76x2_trace
-
-#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
deleted file mode 100644 (file)
index 4c90788..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-#include "mt76x2_dma.h"
-
-struct beacon_bc_data {
-       struct mt76x2_dev *dev;
-       struct sk_buff_head q;
-       struct sk_buff *tail[8];
-};
-
-int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
-                         struct sk_buff *skb, struct mt76_queue *q,
-                         struct mt76_wcid *wcid, struct ieee80211_sta *sta,
-                         u32 *tx_info)
-{
-       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       int qsel = MT_QSEL_EDCA;
-       int ret;
-
-       if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
-               mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
-
-       mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
-
-       ret = mt76x2_insert_hdr_pad(skb);
-       if (ret < 0)
-               return ret;
-
-       if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
-               qsel = MT_QSEL_MGMT;
-
-       *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
-                  MT_TXD_INFO_80211;
-
-       if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
-               *tx_info |= MT_TXD_INFO_WIV;
-
-       return 0;
-}
-
-static void
-mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
-       struct mt76x2_dev *dev = (struct mt76x2_dev *) priv;
-       struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
-       struct sk_buff *skb = NULL;
-
-       if (!(dev->beacon_mask & BIT(mvif->idx)))
-               return;
-
-       skb = ieee80211_beacon_get(mt76_hw(dev), vif);
-       if (!skb)
-               return;
-
-       mt76x2_mac_set_beacon(dev, mvif->idx, skb);
-}
-
-static void
-mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
-       struct beacon_bc_data *data = priv;
-       struct mt76x2_dev *dev = data->dev;
-       struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
-       struct ieee80211_tx_info *info;
-       struct sk_buff *skb;
-
-       if (!(dev->beacon_mask & BIT(mvif->idx)))
-               return;
-
-       skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
-       if (!skb)
-               return;
-
-       info = IEEE80211_SKB_CB(skb);
-       info->control.vif = vif;
-       info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
-       mt76_skb_set_moredata(skb, true);
-       __skb_queue_tail(&data->q, skb);
-       data->tail[mvif->idx] = skb;
-}
-
-static void
-mt76x2_resync_beacon_timer(struct mt76x2_dev *dev)
-{
-       u32 timer_val = dev->beacon_int << 4;
-
-       dev->tbtt_count++;
-
-       /*
-        * Beacon timer drifts by 1us every tick, the timer is configured
-        * in 1/16 TU (64us) units.
-        */
-       if (dev->tbtt_count < 62)
-               return;
-
-       if (dev->tbtt_count >= 64) {
-               dev->tbtt_count = 0;
-               return;
-       }
-
-       /*
-        * The updated beacon interval takes effect after two TBTT, because
-        * at this point the original interval has already been loaded into
-        * the next TBTT_TIMER value
-        */
-       if (dev->tbtt_count == 62)
-               timer_val -= 1;
-
-       mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
-                      MT_BEACON_TIME_CFG_INTVAL, timer_val);
-}
-
-void mt76x2_pre_tbtt_tasklet(unsigned long arg)
-{
-       struct mt76x2_dev *dev = (struct mt76x2_dev *) arg;
-       struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
-       struct beacon_bc_data data = {};
-       struct sk_buff *skb;
-       int i, nframes;
-
-       mt76x2_resync_beacon_timer(dev);
-
-       data.dev = dev;
-       __skb_queue_head_init(&data.q);
-
-       ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
-               IEEE80211_IFACE_ITER_RESUME_ALL,
-               mt76x2_update_beacon_iter, dev);
-
-       do {
-               nframes = skb_queue_len(&data.q);
-               ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
-                       IEEE80211_IFACE_ITER_RESUME_ALL,
-                       mt76x2_add_buffered_bc, &data);
-       } while (nframes != skb_queue_len(&data.q));
-
-       if (!nframes)
-               return;
-
-       for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
-               if (!data.tail[i])
-                       continue;
-
-               mt76_skb_set_moredata(data.tail[i], false);
-       }
-
-       spin_lock_bh(&q->lock);
-       while ((skb = __skb_dequeue(&data.q)) != NULL) {
-               struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-               struct ieee80211_vif *vif = info->control.vif;
-               struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
-
-               mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
-                                     NULL);
-       }
-       spin_unlock_bh(&q->lock);
-}
-
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
deleted file mode 100644 (file)
index 36afb16..0000000
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-#include "mt76x2_dma.h"
-
-void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
-              struct sk_buff *skb)
-{
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct mt76x2_dev *dev = hw->priv;
-       struct ieee80211_vif *vif = info->control.vif;
-       struct mt76_wcid *wcid = &dev->global_wcid;
-
-       if (control->sta) {
-               struct mt76x2_sta *msta;
-
-               msta = (struct mt76x2_sta *)control->sta->drv_priv;
-               wcid = &msta->wcid;
-               /* sw encrypted frames */
-               if (!info->control.hw_key && wcid->hw_key_idx != -1)
-                       control->sta = NULL;
-       }
-
-       if (vif && !control->sta) {
-               struct mt76x2_vif *mvif;
-
-               mvif = (struct mt76x2_vif *)vif->drv_priv;
-               wcid = &mvif->group_wcid;
-       }
-
-       mt76_tx(&dev->mt76, control->sta, wcid, skb);
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx);
-
-int mt76x2_insert_hdr_pad(struct sk_buff *skb)
-{
-       int len = ieee80211_get_hdrlen_from_skb(skb);
-
-       if (len % 4 == 0)
-               return 0;
-
-       skb_push(skb, 2);
-       memmove(skb->data, skb->data + 2, len);
-
-       skb->data[len] = 0;
-       skb->data[len + 1] = 0;
-       return 2;
-}
-EXPORT_SYMBOL_GPL(mt76x2_insert_hdr_pad);
-
-s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
-                              const struct ieee80211_tx_rate *rate)
-{
-       s8 max_txpwr;
-
-       if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
-               u8 mcs = ieee80211_rate_get_vht_mcs(rate);
-
-               if (mcs == 8 || mcs == 9) {
-                       max_txpwr = dev->rate_power.vht[8];
-               } else {
-                       u8 nss, idx;
-
-                       nss = ieee80211_rate_get_vht_nss(rate);
-                       idx = ((nss - 1) << 3) + mcs;
-                       max_txpwr = dev->rate_power.ht[idx & 0xf];
-               }
-       } else if (rate->flags & IEEE80211_TX_RC_MCS) {
-               max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
-       } else {
-               enum nl80211_band band = dev->mt76.chandef.chan->band;
-
-               if (band == NL80211_BAND_2GHZ) {
-                       const struct ieee80211_rate *r;
-                       struct wiphy *wiphy = mt76_hw(dev)->wiphy;
-                       struct mt76_rate_power *rp = &dev->rate_power;
-
-                       r = &wiphy->bands[band]->bitrates[rate->idx];
-                       if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
-                               max_txpwr = rp->cck[r->hw_value & 0x3];
-                       else
-                               max_txpwr = rp->ofdm[r->hw_value & 0x7];
-               } else {
-                       max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
-               }
-       }
-
-       return max_txpwr;
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx_get_max_txpwr_adj);
-
-s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
-{
-       txpwr = min_t(s8, txpwr, dev->txpower_conf);
-       txpwr -= (dev->target_power + dev->target_power_delta[0]);
-       txpwr = min_t(s8, txpwr, max_txpwr_adj);
-
-       if (!dev->enable_tpc)
-               return 0;
-       else if (txpwr >= 0)
-               return min_t(s8, txpwr, 7);
-       else
-               return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx_get_txpwr_adj);
-
-void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
-{
-       s8 txpwr_adj;
-
-       txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
-                                           dev->rate_power.ofdm[4]);
-       mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
-                      MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
-       mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
-                      MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx_set_txpwr_auto);
-
-void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
-{
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
-       if (info->flags & IEEE80211_TX_CTL_AMPDU) {
-               ieee80211_free_txskb(mt76_hw(dev), skb);
-       } else {
-               ieee80211_tx_info_clear_status(info);
-               info->status.rates[0].idx = -1;
-               info->flags |= IEEE80211_TX_STAT_ACK;
-               ieee80211_tx_status(mt76_hw(dev), skb);
-       }
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx_complete);
-
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
deleted file mode 100644 (file)
index 1428cfd..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include "mt76x2u.h"
-
-static const struct usb_device_id mt76x2u_device_table[] = {
-       { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
-       { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
-       { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
-       { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
-       { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
-       { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
-       { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
-       { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
-       { },
-};
-
-static int mt76x2u_probe(struct usb_interface *intf,
-                        const struct usb_device_id *id)
-{
-       struct usb_device *udev = interface_to_usbdev(intf);
-       struct mt76x2_dev *dev;
-       int err;
-
-       dev = mt76x2u_alloc_device(&intf->dev);
-       if (!dev)
-               return -ENOMEM;
-
-       udev = usb_get_dev(udev);
-       usb_reset_device(udev);
-
-       err = mt76u_init(&dev->mt76, intf);
-       if (err < 0)
-               goto err;
-
-       dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
-       dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
-
-       err = mt76x2u_register_device(dev);
-       if (err < 0)
-               goto err;
-
-       return 0;
-
-err:
-       ieee80211_free_hw(mt76_hw(dev));
-       usb_set_intfdata(intf, NULL);
-       usb_put_dev(udev);
-
-       return err;
-}
-
-static void mt76x2u_disconnect(struct usb_interface *intf)
-{
-       struct usb_device *udev = interface_to_usbdev(intf);
-       struct mt76x2_dev *dev = usb_get_intfdata(intf);
-       struct ieee80211_hw *hw = mt76_hw(dev);
-
-       set_bit(MT76_REMOVED, &dev->mt76.state);
-       ieee80211_unregister_hw(hw);
-       mt76x2u_cleanup(dev);
-
-       ieee80211_free_hw(hw);
-       usb_set_intfdata(intf, NULL);
-       usb_put_dev(udev);
-}
-
-static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
-                                         pm_message_t state)
-{
-       struct mt76x2_dev *dev = usb_get_intfdata(intf);
-       struct mt76_usb *usb = &dev->mt76.usb;
-
-       mt76u_stop_queues(&dev->mt76);
-       mt76x2u_stop_hw(dev);
-       usb_kill_urb(usb->mcu.res.urb);
-
-       return 0;
-}
-
-static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
-{
-       struct mt76x2_dev *dev = usb_get_intfdata(intf);
-       struct mt76_usb *usb = &dev->mt76.usb;
-       int err;
-
-       reinit_completion(&usb->mcu.cmpl);
-       err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
-                              MT_EP_IN_CMD_RESP,
-                              &usb->mcu.res, GFP_KERNEL,
-                              mt76u_mcu_complete_urb,
-                              &usb->mcu.cmpl);
-       if (err < 0)
-               return err;
-
-       err = mt76u_submit_rx_buffers(&dev->mt76);
-       if (err < 0)
-               return err;
-
-       tasklet_enable(&usb->rx_tasklet);
-       tasklet_enable(&usb->tx_tasklet);
-
-       return mt76x2u_init_hardware(dev);
-}
-
-MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
-MODULE_FIRMWARE(MT7662U_FIRMWARE);
-MODULE_FIRMWARE(MT7662U_ROM_PATCH);
-
-static struct usb_driver mt76x2u_driver = {
-       .name           = KBUILD_MODNAME,
-       .id_table       = mt76x2u_device_table,
-       .probe          = mt76x2u_probe,
-       .disconnect     = mt76x2u_disconnect,
-#ifdef CONFIG_PM
-       .suspend        = mt76x2u_suspend,
-       .resume         = mt76x2u_resume,
-       .reset_resume   = mt76x2u_resume,
-#endif /* CONFIG_PM */
-       .soft_unbind    = 1,
-       .disable_hub_initiated_lpm = 1,
-};
-module_usb_driver(mt76x2u_driver);
-
-MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
deleted file mode 100644 (file)
index 008092f..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __MT76x2U_H
-#define __MT76x2U_H
-
-#include <linux/device.h>
-
-#include "mt76x2.h"
-#include "mt76x2_dma.h"
-#include "mt76x2_mcu.h"
-
-#define MT7612U_EEPROM_SIZE            512
-
-#define MT_USB_AGGR_SIZE_LIMIT         21 /* 1024B unit */
-#define MT_USB_AGGR_TIMEOUT            0x80 /* 33ns unit */
-
-extern const struct ieee80211_ops mt76x2u_ops;
-
-struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev);
-int mt76x2u_register_device(struct mt76x2_dev *dev);
-int mt76x2u_init_hardware(struct mt76x2_dev *dev);
-void mt76x2u_cleanup(struct mt76x2_dev *dev);
-void mt76x2u_stop_hw(struct mt76x2_dev *dev);
-
-void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr);
-int mt76x2u_mac_reset(struct mt76x2_dev *dev);
-void mt76x2u_mac_resume(struct mt76x2_dev *dev);
-int mt76x2u_mac_start(struct mt76x2_dev *dev);
-int mt76x2u_mac_stop(struct mt76x2_dev *dev);
-
-int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
-                           struct cfg80211_chan_def *chandef);
-void mt76x2u_phy_calibrate(struct work_struct *work);
-void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev);
-void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev);
-void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev);
-
-void mt76x2u_mcu_complete_urb(struct urb *urb);
-int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
-                           u8 bw_index, bool scan);
-int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
-                         u32 val);
-int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
-                         struct mt76x2_tssi_comp *tssi_data);
-int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
-                         bool force);
-int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
-                               bool ext, int rssi, u32 false_cca);
-int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val);
-int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type,
-                       u8 temp_level, u8 channel);
-int mt76x2u_mcu_init(struct mt76x2_dev *dev);
-int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev);
-void mt76x2u_mcu_deinit(struct mt76x2_dev *dev);
-
-int mt76x2u_alloc_queues(struct mt76x2_dev *dev);
-void mt76x2u_queues_deinit(struct mt76x2_dev *dev);
-void mt76x2u_stop_queues(struct mt76x2_dev *dev);
-bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update);
-int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
-                          struct sk_buff *skb, struct mt76_queue *q,
-                          struct mt76_wcid *wcid, struct ieee80211_sta *sta,
-                          u32 *tx_info);
-void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
-                            struct mt76_queue_entry *e, bool flush);
-int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
-                        u32 flags);
-
-#endif /* __MT76x2U_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
deleted file mode 100644 (file)
index 1ca5dd0..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2u.h"
-#include "dma.h"
-
-static void mt76x2u_remove_dma_hdr(struct sk_buff *skb)
-{
-       int hdr_len;
-
-       skb_pull(skb, sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN);
-       hdr_len = ieee80211_get_hdrlen_from_skb(skb);
-       if (hdr_len % 4) {
-               memmove(skb->data + 2, skb->data, hdr_len);
-               skb_pull(skb, 2);
-       }
-}
-
-static int
-mt76x2u_check_skb_rooms(struct sk_buff *skb)
-{
-       int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
-       u32 need_head;
-
-       need_head = sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN;
-       if (hdr_len % 4)
-               need_head += 2;
-       return skb_cow(skb, need_head);
-}
-
-static int
-mt76x2u_set_txinfo(struct sk_buff *skb,
-                  struct mt76_wcid *wcid, u8 ep)
-{
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       enum mt76x2_qsel qsel;
-       u32 flags;
-
-       if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
-           ep == MT_EP_OUT_HCCA)
-               qsel = MT_QSEL_MGMT;
-       else
-               qsel = MT_QSEL_EDCA;
-
-       flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
-               MT_TXD_INFO_80211;
-       if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
-               flags |= MT_TXD_INFO_WIV;
-
-       return mt76u_skb_dma_info(skb, WLAN_PORT, flags);
-}
-
-bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update)
-{
-       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-       struct mt76x2_tx_status stat;
-
-       if (!mt76x2_mac_load_tx_status(dev, &stat))
-               return false;
-
-       mt76x2_send_tx_status(dev, &stat, update);
-
-       return true;
-}
-
-int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
-                          struct sk_buff *skb, struct mt76_queue *q,
-                          struct mt76_wcid *wcid, struct ieee80211_sta *sta,
-                          u32 *tx_info)
-{
-       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-       struct mt76x2_txwi *txwi;
-       int err, len = skb->len;
-
-       err = mt76x2u_check_skb_rooms(skb);
-       if (err < 0)
-               return -ENOMEM;
-
-       mt76x2_insert_hdr_pad(skb);
-
-       txwi = skb_push(skb, sizeof(struct mt76x2_txwi));
-       mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
-
-       return mt76x2u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
-}
-
-void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
-                            struct mt76_queue_entry *e, bool flush)
-{
-       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-
-       mt76x2u_remove_dma_hdr(e->skb);
-       mt76x2_tx_complete(dev, e->skb);
-}
-
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
deleted file mode 100644 (file)
index 9b81e76..0000000
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/delay.h>
-
-#include "mt76x2u.h"
-#include "mt76x2_eeprom.h"
-
-static void mt76x2u_init_dma(struct mt76x2_dev *dev)
-{
-       u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
-
-       val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD |
-              MT_USB_DMA_CFG_RX_BULK_EN |
-              MT_USB_DMA_CFG_TX_BULK_EN;
-
-       /* disable AGGR_BULK_RX in order to receive one
-        * frame in each rx urb and avoid copies
-        */
-       val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
-       mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
-}
-
-static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev)
-{
-       mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
-       udelay(1);
-
-       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff);
-       mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30);
-
-       mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f);
-       udelay(1);
-
-       mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17));
-       usleep_range(150, 200);
-
-       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16));
-       usleep_range(50, 100);
-
-       mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
-}
-
-static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit)
-{
-       int shift = unit ? 8 : 0;
-       u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
-
-       /* Enable RF BG */
-       mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift);
-       usleep_range(10, 20);
-
-       /* Enable RFDIG LDO/AFE/ABB/ADDA */
-       mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val);
-       usleep_range(10, 20);
-
-       /* Switch RFDIG power to internal LDO */
-       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift);
-       usleep_range(10, 20);
-
-       mt76x2u_power_on_rf_patch(dev);
-
-       mt76_set(dev, 0x530, 0xf);
-}
-
-static void mt76x2u_power_on(struct mt76x2_dev *dev)
-{
-       u32 val;
-
-       /* Turn on WL MTCMOS */
-       mt76_set(dev, MT_VEND_ADDR(CFG, 0x148),
-                MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
-
-       val = MT_WLAN_MTC_CTRL_STATE_UP |
-             MT_WLAN_MTC_CTRL_PWR_ACK |
-             MT_WLAN_MTC_CTRL_PWR_ACK_S;
-
-       mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000);
-
-       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16);
-       usleep_range(10, 20);
-
-       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
-       usleep_range(10, 20);
-
-       mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
-       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff);
-
-       /* Turn on AD/DA power down */
-       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3));
-
-       /* WLAN function enable */
-       mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0));
-
-       /* Release BBP software reset */
-       mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18));
-
-       mt76x2u_power_on_rf(dev, 0);
-       mt76x2u_power_on_rf(dev, 1);
-}
-
-static int mt76x2u_init_eeprom(struct mt76x2_dev *dev)
-{
-       u32 val, i;
-
-       dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev,
-                                            MT7612U_EEPROM_SIZE,
-                                            GFP_KERNEL);
-       dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE;
-       if (!dev->mt76.eeprom.data)
-               return -ENOMEM;
-
-       for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) {
-               val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i));
-               put_unaligned_le32(val, dev->mt76.eeprom.data + i);
-       }
-
-       mt76x2_eeprom_parse_hw_cap(dev);
-       return 0;
-}
-
-struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev)
-{
-       static const struct mt76_driver_ops drv_ops = {
-               .tx_prepare_skb = mt76x2u_tx_prepare_skb,
-               .tx_complete_skb = mt76x2u_tx_complete_skb,
-               .tx_status_data = mt76x2u_tx_status_data,
-               .rx_skb = mt76x2_queue_rx_skb,
-       };
-       struct mt76x2_dev *dev;
-       struct mt76_dev *mdev;
-
-       mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
-       if (!mdev)
-               return NULL;
-
-       dev = container_of(mdev, struct mt76x2_dev, mt76);
-       mdev->dev = pdev;
-       mdev->drv = &drv_ops;
-
-       mutex_init(&dev->mutex);
-
-       return dev;
-}
-
-static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev)
-{
-       mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
-       mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
-       mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840);
-       mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
-}
-
-int mt76x2u_init_hardware(struct mt76x2_dev *dev)
-{
-       static const u16 beacon_offsets[] = {
-               /* 512 byte per beacon */
-               0xc000, 0xc200, 0xc400, 0xc600,
-               0xc800, 0xca00, 0xcc00, 0xce00,
-               0xd000, 0xd200, 0xd400, 0xd600,
-               0xd800, 0xda00, 0xdc00, 0xde00
-       };
-       const struct mt76_wcid_addr addr = {
-               .macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
-               .ba_mask = 0,
-       };
-       int i, err;
-
-       dev->beacon_offsets = beacon_offsets;
-
-       mt76x2_reset_wlan(dev, true);
-       mt76x2u_power_on(dev);
-
-       if (!mt76x2_wait_for_mac(dev))
-               return -ETIMEDOUT;
-
-       err = mt76x2u_mcu_fw_init(dev);
-       if (err < 0)
-               return err;
-
-       if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
-                           MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
-                           MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
-               return -EIO;
-
-       /* wait for asic ready after fw load. */
-       if (!mt76x2_wait_for_mac(dev))
-               return -ETIMEDOUT;
-
-       mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0);
-       mt76_wr(dev, MT_TSO_CTRL, 0);
-
-       mt76x2u_init_dma(dev);
-
-       err = mt76x2u_mcu_init(dev);
-       if (err < 0)
-               return err;
-
-       err = mt76x2u_mac_reset(dev);
-       if (err < 0)
-               return err;
-
-       mt76x2u_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
-       dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
-
-       mt76x2u_init_beacon_offsets(dev);
-
-       if (!mt76x2_wait_for_bbp(dev))
-               return -ETIMEDOUT;
-
-       /* reset wcid table */
-       for (i = 0; i < 254; i++)
-               mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr,
-                            sizeof(struct mt76_wcid_addr));
-
-       /* reset shared key table and pairwise key table */
-       for (i = 0; i < 4; i++)
-               mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0);
-       for (i = 0; i < 256; i++)
-               mt76_wr(dev, MT_WCID_ATTR(i), 1);
-
-       mt76_clear(dev, MT_BEACON_TIME_CFG,
-                  MT_BEACON_TIME_CFG_TIMER_EN |
-                  MT_BEACON_TIME_CFG_SYNC_MODE |
-                  MT_BEACON_TIME_CFG_TBTT_EN |
-                  MT_BEACON_TIME_CFG_BEACON_TX);
-
-       mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
-       mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
-
-       err = mt76x2u_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
-       if (err < 0)
-               return err;
-
-       mt76x2u_phy_set_rxpath(dev);
-       mt76x2u_phy_set_txdac(dev);
-
-       return mt76x2u_mac_stop(dev);
-}
-
-int mt76x2u_register_device(struct mt76x2_dev *dev)
-{
-       struct ieee80211_hw *hw = mt76_hw(dev);
-       struct wiphy *wiphy = hw->wiphy;
-       int err;
-
-       INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
-       mt76x2_init_device(dev);
-
-       err = mt76x2u_init_eeprom(dev);
-       if (err < 0)
-               return err;
-
-       err = mt76u_mcu_init_rx(&dev->mt76);
-       if (err < 0)
-               return err;
-
-       err = mt76u_alloc_queues(&dev->mt76);
-       if (err < 0)
-               goto fail;
-
-       err = mt76x2u_init_hardware(dev);
-       if (err < 0)
-               goto fail;
-
-       wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
-
-       err = mt76_register_device(&dev->mt76, true, mt76x2_rates,
-                                  ARRAY_SIZE(mt76x2_rates));
-       if (err)
-               goto fail;
-
-       /* check hw sg support in order to enable AMSDU */
-       if (mt76u_check_sg(&dev->mt76))
-               hw->max_tx_fragments = MT_SG_MAX_SIZE;
-       else
-               hw->max_tx_fragments = 1;
-
-       set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
-
-       mt76x2_init_debugfs(dev);
-       mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
-       mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
-
-       return 0;
-
-fail:
-       mt76x2u_cleanup(dev);
-       return err;
-}
-
-void mt76x2u_stop_hw(struct mt76x2_dev *dev)
-{
-       mt76u_stop_stat_wk(&dev->mt76);
-       cancel_delayed_work_sync(&dev->cal_work);
-       mt76x2u_mac_stop(dev);
-}
-
-void mt76x2u_cleanup(struct mt76x2_dev *dev)
-{
-       mt76x2u_mcu_set_radio_state(dev, false);
-       mt76x2u_stop_hw(dev);
-       mt76u_queues_deinit(&dev->mt76);
-       mt76x2u_mcu_deinit(dev);
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
deleted file mode 100644 (file)
index eab7ab2..0000000
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2u.h"
-#include "mt76x2_eeprom.h"
-
-static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev)
-{
-       mt76_rr(dev, MT_RX_STAT_0);
-       mt76_rr(dev, MT_RX_STAT_1);
-       mt76_rr(dev, MT_RX_STAT_2);
-       mt76_rr(dev, MT_TX_STA_0);
-       mt76_rr(dev, MT_TX_STA_1);
-       mt76_rr(dev, MT_TX_STA_2);
-}
-
-static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
-{
-       s8 offset = 0;
-       u16 eep_val;
-
-       eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
-
-       offset = eep_val & 0x7f;
-       if ((eep_val & 0xff) == 0xff)
-               offset = 0;
-       else if (eep_val & 0x80)
-               offset = 0 - offset;
-
-       eep_val >>= 8;
-       if (eep_val == 0x00 || eep_val == 0xff) {
-               eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
-               eep_val &= 0xff;
-
-               if (eep_val == 0x00 || eep_val == 0xff)
-                       eep_val = 0x14;
-       }
-
-       eep_val &= 0x7f;
-       mt76_rmw_field(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL5),
-                      MT_XO_CTRL5_C2_VAL, eep_val + offset);
-       mt76_set(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL6), MT_XO_CTRL6_C2_CTRL);
-
-       mt76_wr(dev, 0x504, 0x06000000);
-       mt76_wr(dev, 0x50c, 0x08800000);
-       mdelay(5);
-       mt76_wr(dev, 0x504, 0x0);
-
-       /* decrease SIFS from 16us to 13us */
-       mt76_rmw_field(dev, MT_XIFS_TIME_CFG,
-                      MT_XIFS_TIME_CFG_OFDM_SIFS, 0xd);
-       mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, MT_BKOFF_SLOT_CFG_CC_DELAY, 1);
-
-       /* init fce */
-       mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
-
-       eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
-       switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
-       case 0:
-               mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
-               break;
-       case 1:
-               mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
-               break;
-       default:
-               break;
-       }
-}
-
-int mt76x2u_mac_reset(struct mt76x2_dev *dev)
-{
-       mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5));
-
-       /* init pbf regs */
-       mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
-       mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
-
-       mt76_write_mac_initvals(dev);
-
-       mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
-       mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
-       mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
-       mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
-
-       mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
-       mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
-       mt76_wr(dev, MT_WMM_CWMAX, 0x34aa);
-
-       mt76_clear(dev, MT_MAC_SYS_CTRL,
-                  MT_MAC_SYS_CTRL_RESET_CSR |
-                  MT_MAC_SYS_CTRL_RESET_BBP);
-
-       if (is_mt7612(dev))
-               mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
-
-       mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
-       mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
-
-       mt76x2u_mac_fixup_xtal(dev);
-
-       return 0;
-}
-
-int mt76x2u_mac_start(struct mt76x2_dev *dev)
-{
-       mt76x2u_mac_reset_counters(dev);
-
-       mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
-       wait_for_wpdma(dev);
-       usleep_range(50, 100);
-
-       mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
-       mt76_wr(dev, MT_MAC_SYS_CTRL,
-               MT_MAC_SYS_CTRL_ENABLE_TX |
-               MT_MAC_SYS_CTRL_ENABLE_RX);
-
-       return 0;
-}
-
-int mt76x2u_mac_stop(struct mt76x2_dev *dev)
-{
-       int i, count = 0, val;
-       bool stopped = false;
-       u32 rts_cfg;
-
-       if (test_bit(MT76_REMOVED, &dev->mt76.state))
-               return -EIO;
-
-       rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
-       mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
-
-       mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
-       mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
-
-       /* wait tx dma to stop */
-       for (i = 0; i < 2000; i++) {
-               val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
-               if (!(val & MT_USB_DMA_CFG_TX_BUSY) && i > 10)
-                       break;
-               usleep_range(50, 100);
-       }
-
-       /* page count on TxQ */
-       for (i = 0; i < 200; i++) {
-               if (!(mt76_rr(dev, 0x0438) & 0xffffffff) &&
-                   !(mt76_rr(dev, 0x0a30) & 0x000000ff) &&
-                   !(mt76_rr(dev, 0x0a34) & 0xff00ff00))
-                       break;
-               usleep_range(10, 20);
-       }
-
-       /* disable tx-rx */
-       mt76_clear(dev, MT_MAC_SYS_CTRL,
-                  MT_MAC_SYS_CTRL_ENABLE_RX |
-                  MT_MAC_SYS_CTRL_ENABLE_TX);
-
-       /* Wait for MAC to become idle */
-       for (i = 0; i < 1000; i++) {
-               if (!(mt76_rr(dev, MT_MAC_STATUS) & MT_MAC_STATUS_TX) &&
-                   !mt76_rr(dev, MT_BBP(IBI, 12))) {
-                       stopped = true;
-                       break;
-               }
-               usleep_range(10, 20);
-       }
-
-       if (!stopped) {
-               mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
-               mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
-
-               mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
-               mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
-       }
-
-       /* page count on RxQ */
-       for (i = 0; i < 200; i++) {
-               if (!(mt76_rr(dev, 0x0430) & 0x00ff0000) &&
-                   !(mt76_rr(dev, 0x0a30) & 0xffffffff) &&
-                   !(mt76_rr(dev, 0x0a34) & 0xffffffff) &&
-                   ++count > 10)
-                       break;
-               msleep(50);
-       }
-
-       if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 2000))
-               dev_warn(dev->mt76.dev, "MAC RX failed to stop\n");
-
-       /* wait rx dma to stop */
-       for (i = 0; i < 2000; i++) {
-               val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
-               if (!(val & MT_USB_DMA_CFG_RX_BUSY) && i > 10)
-                       break;
-               usleep_range(50, 100);
-       }
-
-       mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
-
-       return 0;
-}
-
-void mt76x2u_mac_resume(struct mt76x2_dev *dev)
-{
-       mt76_wr(dev, MT_MAC_SYS_CTRL,
-               MT_MAC_SYS_CTRL_ENABLE_TX |
-               MT_MAC_SYS_CTRL_ENABLE_RX);
-       mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20));
-       mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1));
-}
-
-void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr)
-{
-       ether_addr_copy(dev->mt76.macaddr, addr);
-
-       if (!is_valid_ether_addr(dev->mt76.macaddr)) {
-               eth_random_addr(dev->mt76.macaddr);
-               dev_info(dev->mt76.dev,
-                        "Invalid MAC address, using random address %pM\n",
-                        dev->mt76.macaddr);
-       }
-
-       mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
-       mt76_wr(dev, MT_MAC_ADDR_DW1,
-               get_unaligned_le16(dev->mt76.macaddr + 4) |
-               FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
-}
-
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
deleted file mode 100644 (file)
index 7367ba1..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2u.h"
-
-static int mt76x2u_start(struct ieee80211_hw *hw)
-{
-       struct mt76x2_dev *dev = hw->priv;
-       int ret;
-
-       mutex_lock(&dev->mutex);
-
-       ret = mt76x2u_mac_start(dev);
-       if (ret)
-               goto out;
-
-       set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
-
-out:
-       mutex_unlock(&dev->mutex);
-       return ret;
-}
-
-static void mt76x2u_stop(struct ieee80211_hw *hw)
-{
-       struct mt76x2_dev *dev = hw->priv;
-
-       mutex_lock(&dev->mutex);
-       clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
-       mt76x2u_stop_hw(dev);
-       mutex_unlock(&dev->mutex);
-}
-
-static int mt76x2u_add_interface(struct ieee80211_hw *hw,
-                                struct ieee80211_vif *vif)
-{
-       struct mt76x2_dev *dev = hw->priv;
-       struct mt76x2_vif *mvif = (struct mt76x2_vif *)vif->drv_priv;
-       unsigned int idx = 0;
-
-       if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
-               mt76x2u_mac_setaddr(dev, vif->addr);
-
-       mvif->idx = idx;
-       mvif->group_wcid.idx = MT_VIF_WCID(idx);
-       mvif->group_wcid.hw_key_idx = -1;
-       mt76x2_txq_init(dev, vif->txq);
-
-       return 0;
-}
-
-static int
-mt76x2u_set_channel(struct mt76x2_dev *dev,
-                   struct cfg80211_chan_def *chandef)
-{
-       int err;
-
-       cancel_delayed_work_sync(&dev->cal_work);
-       set_bit(MT76_RESET, &dev->mt76.state);
-
-       mt76_set_channel(&dev->mt76);
-
-       mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
-       mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
-       mt76x2_mac_stop(dev, false);
-
-       err = mt76x2u_phy_set_channel(dev, chandef);
-
-       mt76x2u_mac_resume(dev);
-
-       clear_bit(MT76_RESET, &dev->mt76.state);
-       mt76_txq_schedule_all(&dev->mt76);
-
-       return err;
-}
-
-static void
-mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                        struct ieee80211_bss_conf *info, u32 changed)
-{
-       struct mt76x2_dev *dev = hw->priv;
-
-       mutex_lock(&dev->mutex);
-
-       if (changed & BSS_CHANGED_ASSOC) {
-               mt76x2u_phy_channel_calibrate(dev);
-               mt76x2_apply_gain_adj(dev);
-       }
-
-       if (changed & BSS_CHANGED_BSSID) {
-               mt76_wr(dev, MT_MAC_BSSID_DW0,
-                       get_unaligned_le32(info->bssid));
-               mt76_wr(dev, MT_MAC_BSSID_DW1,
-                       get_unaligned_le16(info->bssid + 4));
-       }
-
-       mutex_unlock(&dev->mutex);
-}
-
-static int
-mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
-{
-       struct mt76x2_dev *dev = hw->priv;
-       int err = 0;
-
-       mutex_lock(&dev->mutex);
-
-       if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
-               if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
-                       dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
-               else
-                       dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
-               mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-       }
-
-       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
-               ieee80211_stop_queues(hw);
-               err = mt76x2u_set_channel(dev, &hw->conf.chandef);
-               ieee80211_wake_queues(hw);
-       }
-
-       if (changed & IEEE80211_CONF_CHANGE_POWER) {
-               dev->txpower_conf = hw->conf.power_level * 2;
-
-               /* convert to per-chain power for 2x2 devices */
-               dev->txpower_conf -= 6;
-
-               if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
-                       mt76x2_phy_set_txpower(dev);
-       }
-
-       mutex_unlock(&dev->mutex);
-
-       return err;
-}
-
-static void
-mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-               const u8 *mac)
-{
-       struct mt76x2_dev *dev = hw->priv;
-
-       set_bit(MT76_SCANNING, &dev->mt76.state);
-}
-
-static void
-mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
-       struct mt76x2_dev *dev = hw->priv;
-
-       clear_bit(MT76_SCANNING, &dev->mt76.state);
-}
-
-const struct ieee80211_ops mt76x2u_ops = {
-       .tx = mt76x2_tx,
-       .start = mt76x2u_start,
-       .stop = mt76x2u_stop,
-       .add_interface = mt76x2u_add_interface,
-       .remove_interface = mt76x2_remove_interface,
-       .sta_add = mt76x2_sta_add,
-       .sta_remove = mt76x2_sta_remove,
-       .set_key = mt76x2_set_key,
-       .ampdu_action = mt76x2_ampdu_action,
-       .config = mt76x2u_config,
-       .wake_tx_queue = mt76_wake_tx_queue,
-       .bss_info_changed = mt76x2u_bss_info_changed,
-       .configure_filter = mt76x2_configure_filter,
-       .conf_tx = mt76x2_conf_tx,
-       .sw_scan_start = mt76x2u_sw_scan,
-       .sw_scan_complete = mt76x2u_sw_scan_complete,
-       .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
-};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
deleted file mode 100644 (file)
index 22c16d6..0000000
+++ /dev/null
@@ -1,463 +0,0 @@
-/*
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/firmware.h>
-
-#include "mt76x2u.h"
-#include "mt76x2_eeprom.h"
-
-#define MT_CMD_HDR_LEN                 4
-#define MT_INBAND_PACKET_MAX_LEN       192
-#define MT_MCU_MEMMAP_WLAN             0x410000
-
-#define MCU_FW_URB_MAX_PAYLOAD         0x3900
-#define MCU_ROM_PATCH_MAX_PAYLOAD      2048
-
-#define MT76U_MCU_ILM_OFFSET           0x80000
-#define MT76U_MCU_DLM_OFFSET           0x110000
-#define MT76U_MCU_ROM_PATCH_OFFSET     0x90000
-
-static int
-mt76x2u_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
-                           u32 val)
-{
-       struct {
-               __le32 id;
-               __le32 value;
-       } __packed __aligned(4) msg = {
-               .id = cpu_to_le32(func),
-               .value = cpu_to_le32(val),
-       };
-       struct sk_buff *skb;
-
-       skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
-       if (!skb)
-               return -ENOMEM;
-       return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_FUN_SET_OP,
-                                 func != Q_SELECT);
-}
-
-int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val)
-{
-       struct {
-               __le32 mode;
-               __le32 level;
-       } __packed __aligned(4) msg = {
-               .mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF),
-               .level = cpu_to_le32(0),
-       };
-       struct sk_buff *skb;
-
-       skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
-       if (!skb)
-               return -ENOMEM;
-       return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_POWER_SAVING_OP,
-                                 false);
-}
-
-int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
-                       u8 channel)
-{
-       struct {
-               u8 cr_mode;
-               u8 temp;
-               u8 ch;
-               u8 _pad0;
-               __le32 cfg;
-       } __packed __aligned(4) msg = {
-               .cr_mode = type,
-               .temp = temp_level,
-               .ch = channel,
-       };
-       struct sk_buff *skb;
-       u32 val;
-
-       val = BIT(31);
-       val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
-       val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
-       msg.cfg = cpu_to_le32(val);
-
-       /* first set the channel without the extension channel info */
-       skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
-       if (!skb)
-               return -ENOMEM;
-       return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_LOAD_CR, true);
-}
-
-int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
-                           u8 bw_index, bool scan)
-{
-       struct {
-               u8 idx;
-               u8 scan;
-               u8 bw;
-               u8 _pad0;
-
-               __le16 chainmask;
-               u8 ext_chan;
-               u8 _pad1;
-
-       } __packed __aligned(4) msg = {
-               .idx = channel,
-               .scan = scan,
-               .bw = bw,
-               .chainmask = cpu_to_le16(dev->chainmask),
-       };
-       struct sk_buff *skb;
-
-       /* first set the channel without the extension channel info */
-       skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
-       if (!skb)
-               return -ENOMEM;
-
-       mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
-
-       usleep_range(5000, 10000);
-
-       msg.ext_chan = 0xe0 + bw_index;
-       skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
-       if (!skb)
-               return -ENOMEM;
-
-       return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
-}
-
-int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
-                         u32 val)
-{
-       struct {
-               __le32 id;
-               __le32 value;
-       } __packed __aligned(4) msg = {
-               .id = cpu_to_le32(type),
-               .value = cpu_to_le32(val),
-       };
-       struct sk_buff *skb;
-
-       skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
-       if (!skb)
-               return -ENOMEM;
-       return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
-}
-
-int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
-                         bool force)
-{
-       struct {
-               __le32 channel;
-               __le32 gain_val;
-       } __packed __aligned(4) msg = {
-               .channel = cpu_to_le32(channel),
-               .gain_val = cpu_to_le32(gain),
-       };
-       struct sk_buff *skb;
-
-       if (force)
-               msg.channel |= cpu_to_le32(BIT(31));
-
-       skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
-       if (!skb)
-               return -ENOMEM;
-       return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_INIT_GAIN_OP, true);
-}
-
-int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
-                               bool ext, int rssi, u32 false_cca)
-{
-       struct {
-               __le32 channel;
-               __le32 rssi_val;
-               __le32 false_cca_val;
-       } __packed __aligned(4) msg = {
-               .rssi_val = cpu_to_le32(rssi),
-               .false_cca_val = cpu_to_le32(false_cca),
-       };
-       struct sk_buff *skb;
-       u32 val = channel;
-
-       if (ap)
-               val |= BIT(31);
-       if (ext)
-               val |= BIT(30);
-       msg.channel = cpu_to_le32(val);
-
-       skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
-       if (!skb)
-               return -ENOMEM;
-       return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_DYNC_VGA_OP, true);
-}
-
-int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
-                         struct mt76x2_tssi_comp *tssi_data)
-{
-       struct {
-               __le32 id;
-               struct mt76x2_tssi_comp data;
-       } __packed __aligned(4) msg = {
-               .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
-               .data = *tssi_data,
-       };
-       struct sk_buff *skb;
-
-       skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
-       if (!skb)
-               return -ENOMEM;
-       return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
-}
-
-static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev)
-{
-       mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
-                            USB_DIR_OUT | USB_TYPE_VENDOR,
-                            0x12, 0, NULL, 0);
-}
-
-static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev)
-{
-       struct mt76_usb *usb = &dev->mt76.usb;
-       const u8 data[] = {
-               0x6f, 0xfc, 0x08, 0x01,
-               0x20, 0x04, 0x00, 0x00,
-               0x00, 0x09, 0x00,
-       };
-
-       memcpy(usb->data, data, sizeof(data));
-       mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
-                            USB_DIR_OUT | USB_TYPE_CLASS,
-                            0x12, 0, usb->data, sizeof(data));
-}
-
-static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev)
-{
-       struct mt76_usb *usb = &dev->mt76.usb;
-       u8 data[] = {
-               0x6f, 0xfc, 0x05, 0x01,
-               0x07, 0x01, 0x00, 0x04
-       };
-
-       memcpy(usb->data, data, sizeof(data));
-       mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
-                            USB_DIR_OUT | USB_TYPE_CLASS,
-                            0x12, 0, usb->data, sizeof(data));
-}
-
-static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
-{
-       bool rom_protect = !is_mt7612(dev);
-       struct mt76x2_patch_header *hdr;
-       u32 val, patch_mask, patch_reg;
-       const struct firmware *fw;
-       int err;
-
-       if (rom_protect &&
-           !mt76_poll_msec(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
-               dev_err(dev->mt76.dev,
-                       "could not get hardware semaphore for ROM PATCH\n");
-               return -ETIMEDOUT;
-       }
-
-       if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
-               patch_mask = BIT(0);
-               patch_reg = MT_MCU_CLOCK_CTL;
-       } else {
-               patch_mask = BIT(1);
-               patch_reg = MT_MCU_COM_REG0;
-       }
-
-       if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
-               dev_info(dev->mt76.dev, "ROM patch already applied\n");
-               return 0;
-       }
-
-       err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev);
-       if (err < 0)
-               return err;
-
-       if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
-               dev_err(dev->mt76.dev, "failed to load firmware\n");
-               err = -EIO;
-               goto out;
-       }
-
-       hdr = (struct mt76x2_patch_header *)fw->data;
-       dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
-
-       /* enable USB_DMA_CFG */
-       val = MT_USB_DMA_CFG_RX_BULK_EN |
-             MT_USB_DMA_CFG_TX_BULK_EN |
-             FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
-       mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
-
-       /* vendor reset */
-       mt76u_mcu_fw_reset(&dev->mt76);
-       usleep_range(5000, 10000);
-
-       /* enable FCE to send in-band cmd */
-       mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
-       /* FCE tx_fs_base_ptr */
-       mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
-       /* FCE tx_fs_max_cnt */
-       mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
-       /* FCE pdma enable */
-       mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
-       /* FCE skip_fs_en */
-       mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
-
-       err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
-                                    fw->size - sizeof(*hdr),
-                                    MCU_ROM_PATCH_MAX_PAYLOAD,
-                                    MT76U_MCU_ROM_PATCH_OFFSET);
-       if (err < 0) {
-               err = -EIO;
-               goto out;
-       }
-
-       mt76x2u_mcu_enable_patch(dev);
-       mt76x2u_mcu_reset_wmt(dev);
-       mdelay(20);
-
-       if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 100)) {
-               dev_err(dev->mt76.dev, "failed to load ROM patch\n");
-               err = -ETIMEDOUT;
-       }
-
-out:
-       if (rom_protect)
-               mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
-       release_firmware(fw);
-       return err;
-}
-
-static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
-{
-       u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET;
-       const struct mt76x2_fw_header *hdr;
-       int err, len, ilm_len, dlm_len;
-       const struct firmware *fw;
-
-       err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev);
-       if (err < 0)
-               return err;
-
-       if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
-               err = -EINVAL;
-               goto out;
-       }
-
-       hdr = (const struct mt76x2_fw_header *)fw->data;
-       ilm_len = le32_to_cpu(hdr->ilm_len);
-       dlm_len = le32_to_cpu(hdr->dlm_len);
-       len = sizeof(*hdr) + ilm_len + dlm_len;
-       if (fw->size != len) {
-               err = -EINVAL;
-               goto out;
-       }
-
-       val = le16_to_cpu(hdr->fw_ver);
-       dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
-                (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
-
-       val = le16_to_cpu(hdr->build_ver);
-       dev_info(dev->mt76.dev, "Build: %x\n", val);
-       dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
-
-       /* vendor reset */
-       mt76u_mcu_fw_reset(&dev->mt76);
-       usleep_range(5000, 10000);
-
-       /* enable USB_DMA_CFG */
-       val = MT_USB_DMA_CFG_RX_BULK_EN |
-             MT_USB_DMA_CFG_TX_BULK_EN |
-             FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
-       mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
-       /* enable FCE to send in-band cmd */
-       mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
-       /* FCE tx_fs_base_ptr */
-       mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
-       /* FCE tx_fs_max_cnt */
-       mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
-       /* FCE pdma enable */
-       mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
-       /* FCE skip_fs_en */
-       mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
-
-       /* load ILM */
-       err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
-                                    ilm_len, MCU_FW_URB_MAX_PAYLOAD,
-                                    MT76U_MCU_ILM_OFFSET);
-       if (err < 0) {
-               err = -EIO;
-               goto out;
-       }
-
-       /* load DLM */
-       if (mt76xx_rev(dev) >= MT76XX_REV_E3)
-               dlm_offset += 0x800;
-       err = mt76u_mcu_fw_send_data(&dev->mt76,
-                                    fw->data + sizeof(*hdr) + ilm_len,
-                                    dlm_len, MCU_FW_URB_MAX_PAYLOAD,
-                                    dlm_offset);
-       if (err < 0) {
-               err = -EIO;
-               goto out;
-       }
-
-       mt76x2u_mcu_load_ivb(dev);
-       if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 100)) {
-               dev_err(dev->mt76.dev, "firmware failed to start\n");
-               err = -ETIMEDOUT;
-               goto out;
-       }
-
-       mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
-       /* enable FCE to send in-band cmd */
-       mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
-       dev_dbg(dev->mt76.dev, "firmware running\n");
-
-out:
-       release_firmware(fw);
-       return err;
-}
-
-int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev)
-{
-       int err;
-
-       err = mt76x2u_mcu_load_rom_patch(dev);
-       if (err < 0)
-               return err;
-
-       return mt76x2u_mcu_load_firmware(dev);
-}
-
-int mt76x2u_mcu_init(struct mt76x2_dev *dev)
-{
-       int err;
-
-       err = mt76x2u_mcu_function_select(dev, Q_SELECT, 1);
-       if (err < 0)
-               return err;
-
-       return mt76x2u_mcu_set_radio_state(dev, true);
-}
-
-void mt76x2u_mcu_deinit(struct mt76x2_dev *dev)
-{
-       struct mt76_usb *usb = &dev->mt76.usb;
-
-       usb_kill_urb(usb->mcu.res.urb);
-       mt76u_buf_free(&usb->mcu.res);
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
deleted file mode 100644 (file)
index 5158063..0000000
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2u.h"
-#include "mt76x2_eeprom.h"
-
-void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev)
-{
-       u32 val;
-
-       val = mt76_rr(dev, MT_BBP(AGC, 0));
-       val &= ~BIT(4);
-
-       switch (dev->chainmask & 0xf) {
-       case 2:
-               val |= BIT(3);
-               break;
-       default:
-               val &= ~BIT(3);
-               break;
-       }
-       mt76_wr(dev, MT_BBP(AGC, 0), val);
-}
-
-void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev)
-{
-       int txpath;
-
-       txpath = (dev->chainmask >> 8) & 0xf;
-       switch (txpath) {
-       case 2:
-               mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
-               break;
-       default:
-               mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
-               break;
-       }
-}
-
-void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev)
-{
-       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-       bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
-
-       if (mt76x2_channel_silent(dev))
-               return;
-
-       mt76x2u_mac_stop(dev);
-
-       if (is_5ghz)
-               mt76x2u_mcu_calibrate(dev, MCU_CAL_LC, 0);
-
-       mt76x2u_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
-       mt76x2u_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
-       mt76x2u_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
-       mt76x2u_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
-
-       mt76x2u_mac_resume(dev);
-}
-
-static void
-mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev)
-{
-       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-       struct mt76x2_tx_power_info txp;
-       struct mt76x2_tssi_comp t = {};
-
-       if (!dev->cal.tssi_cal_done)
-               return;
-
-       if (!dev->cal.tssi_comp_pending) {
-               /* TSSI trigger */
-               t.cal_mode = BIT(0);
-               mt76x2u_mcu_tssi_comp(dev, &t);
-               dev->cal.tssi_comp_pending = true;
-       } else {
-               if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
-                       return;
-
-               dev->cal.tssi_comp_pending = false;
-               mt76x2_get_power_info(dev, &txp, chan);
-
-               if (mt76x2_ext_pa_enabled(dev, chan->band))
-                       t.pa_mode = 1;
-
-               t.cal_mode = BIT(1);
-               t.slope0 = txp.chain[0].tssi_slope;
-               t.offset0 = txp.chain[0].tssi_offset;
-               t.slope1 = txp.chain[1].tssi_slope;
-               t.offset1 = txp.chain[1].tssi_offset;
-               mt76x2u_mcu_tssi_comp(dev, &t);
-
-               if (t.pa_mode || dev->cal.dpd_cal_done)
-                       return;
-
-               usleep_range(10000, 20000);
-               mt76x2u_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
-               dev->cal.dpd_cal_done = true;
-       }
-}
-
-static void
-mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev)
-{
-       u8 channel = dev->mt76.chandef.chan->hw_value;
-       int freq, freq1;
-       u32 false_cca;
-
-       freq = dev->mt76.chandef.chan->center_freq;
-       freq1 = dev->mt76.chandef.center_freq1;
-
-       switch (dev->mt76.chandef.width) {
-       case NL80211_CHAN_WIDTH_80: {
-               int ch_group_index;
-
-               ch_group_index = (freq - freq1 + 30) / 20;
-               if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
-                       ch_group_index = 0;
-               channel += 6 - ch_group_index * 4;
-               break;
-       }
-       case NL80211_CHAN_WIDTH_40:
-               if (freq1 > freq)
-                       channel += 2;
-               else
-                       channel -= 2;
-               break;
-       default:
-               break;
-       }
-
-       dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
-       false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
-                             mt76_rr(dev, MT_RX_STAT_1));
-
-       mt76x2u_mcu_set_dynamic_vga(dev, channel, false, false,
-                                   dev->cal.avg_rssi_all, false_cca);
-}
-
-void mt76x2u_phy_calibrate(struct work_struct *work)
-{
-       struct mt76x2_dev *dev;
-
-       dev = container_of(work, struct mt76x2_dev, cal_work.work);
-       mt76x2u_phy_tssi_compensate(dev);
-       mt76x2u_phy_update_channel_gain(dev);
-
-       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
-                                    MT_CALIBRATE_INTERVAL);
-}
-
-int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
-                           struct cfg80211_chan_def *chandef)
-{
-       u32 ext_cca_chan[4] = {
-               [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
-               [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
-               [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
-               [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
-                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
-       };
-       bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
-       struct ieee80211_channel *chan = chandef->chan;
-       u8 channel = chan->hw_value, bw, bw_index;
-       int ch_group_index, freq, freq1, ret;
-
-       dev->cal.channel_cal_done = false;
-       freq = chandef->chan->center_freq;
-       freq1 = chandef->center_freq1;
-
-       switch (chandef->width) {
-       case NL80211_CHAN_WIDTH_40:
-               bw = 1;
-               if (freq1 > freq) {
-                       bw_index = 1;
-                       ch_group_index = 0;
-               } else {
-                       bw_index = 3;
-                       ch_group_index = 1;
-               }
-               channel += 2 - ch_group_index * 4;
-               break;
-       case NL80211_CHAN_WIDTH_80:
-               ch_group_index = (freq - freq1 + 30) / 20;
-               if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
-                       ch_group_index = 0;
-               bw = 2;
-               bw_index = ch_group_index;
-               channel += 6 - ch_group_index * 4;
-               break;
-       default:
-               bw = 0;
-               bw_index = 0;
-               ch_group_index = 0;
-               break;
-       }
-
-       mt76x2_read_rx_gain(dev);
-       mt76x2_phy_set_txpower_regs(dev, chan->band);
-       mt76x2_configure_tx_delay(dev, chan->band, bw);
-       mt76x2_phy_set_txpower(dev);
-
-       mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
-       mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
-
-       mt76_rmw(dev, MT_EXT_CCA_CFG,
-                (MT_EXT_CCA_CFG_CCA0 |
-                 MT_EXT_CCA_CFG_CCA1 |
-                 MT_EXT_CCA_CFG_CCA2 |
-                 MT_EXT_CCA_CFG_CCA3 |
-                 MT_EXT_CCA_CFG_CCA_MASK),
-                ext_cca_chan[ch_group_index]);
-
-       ret = mt76x2u_mcu_set_channel(dev, channel, bw, bw_index, scan);
-       if (ret)
-               return ret;
-
-       mt76x2u_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
-
-       /* Enable LDPC Rx */
-       if (mt76xx_rev(dev) >= MT76XX_REV_E3)
-               mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
-
-       if (!dev->cal.init_cal_done) {
-               u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
-
-               if (val != 0xff)
-                       mt76x2u_mcu_calibrate(dev, MCU_CAL_R, 0);
-       }
-
-       mt76x2u_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
-
-       /* Rx LPF calibration */
-       if (!dev->cal.init_cal_done)
-               mt76x2u_mcu_calibrate(dev, MCU_CAL_RC, 0);
-       dev->cal.init_cal_done = true;
-
-       mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
-       mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
-       mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
-       mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
-       mt76_wr(dev, MT_TXOP_CTRL_CFG, 0X04101b3f);
-
-       mt76_set(dev, MT_BBP(TXO, 4), BIT(25));
-       mt76_set(dev, MT_BBP(RXO, 13), BIT(8));
-
-       if (scan)
-               return 0;
-
-       if (mt76x2_tssi_enabled(dev)) {
-               /* init default values for temp compensation */
-               mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
-                              0x38);
-               mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
-                              0x38);
-
-               /* init tssi calibration */
-               if (!mt76x2_channel_silent(dev)) {
-                       struct ieee80211_channel *chan;
-                       u32 flag = 0;
-
-                       chan = dev->mt76.chandef.chan;
-                       if (chan->band == NL80211_BAND_5GHZ)
-                               flag |= BIT(0);
-                       if (mt76x2_ext_pa_enabled(dev, chan->band))
-                               flag |= BIT(8);
-                       mt76x2u_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
-                       dev->cal.tssi_cal_done = true;
-               }
-       }
-
-       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
-                                    MT_CALIBRATE_INTERVAL);
-       return 0;
-}
index af48d43bb7dca0ee7a104fa71b6c194aab1788c7..bf0e9e666bc4979efc4ccefebd257f0fd4f7a807 100644 (file)
@@ -91,11 +91,23 @@ mt76_txq_get_qid(struct ieee80211_txq *txq)
        return txq->ac;
 }
 
+static void
+mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+       if (!ieee80211_is_data_qos(hdr->frame_control))
+               return;
+
+       mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
+}
+
 void
 mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
        struct mt76_wcid *wcid, struct sk_buff *skb)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct mt76_queue *q;
        int qid = skb_get_queue_mapping(skb);
 
@@ -108,6 +120,19 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
                ieee80211_get_tx_rates(info->control.vif, sta, skb,
                                       info->control.rates, 1);
 
+       if (sta && ieee80211_is_data_qos(hdr->frame_control)) {
+               struct ieee80211_txq *txq;
+               struct mt76_txq *mtxq;
+               u8 tid;
+
+               tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+               txq = sta->txq[tid];
+               mtxq = (struct mt76_txq *) txq->drv_priv;
+
+               if (mtxq->aggr)
+                       mt76_check_agg_ssn(mtxq, skb);
+       }
+
        q = &dev->q_tx[qid];
 
        spin_lock_bh(&q->lock);
@@ -143,17 +168,6 @@ mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
        return skb;
 }
 
-static void
-mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
-{
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-
-       if (!ieee80211_is_data_qos(hdr->frame_control))
-               return;
-
-       mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
-}
-
 static void
 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
                  struct sk_buff *skb, bool last)
@@ -442,3 +456,19 @@ void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
        mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
 }
 EXPORT_SYMBOL_GPL(mt76_txq_init);
+
+u8 mt76_ac_to_hwq(u8 ac)
+{
+       static const u8 wmm_queue_map[] = {
+               [IEEE80211_AC_BE] = 0,
+               [IEEE80211_AC_BK] = 1,
+               [IEEE80211_AC_VI] = 2,
+               [IEEE80211_AC_VO] = 3,
+       };
+
+       if (WARN_ON(ac >= IEEE80211_NUM_ACS))
+               return 0;
+
+       return wmm_queue_map[ac];
+}
+EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
index 7780b07543bb8d2bf247268f456e44efed517eab..6b643ea701e3875bee4eea1cb11a8846e1c1171f 100644 (file)
@@ -14,6 +14,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <linux/module.h>
 #include "mt76.h"
 #include "usb_trace.h"
 #include "dma.h"
@@ -109,6 +110,7 @@ u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(mt76u_rr);
 
 /* should be called with usb_ctrl_mtx locked */
 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
@@ -140,6 +142,7 @@ void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
        __mt76u_wr(dev, addr, val);
        mutex_unlock(&dev->usb.usb_ctrl_mtx);
 }
+EXPORT_SYMBOL_GPL(mt76u_wr);
 
 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
                     u32 mask, u32 val)
@@ -186,6 +189,60 @@ void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
 }
 EXPORT_SYMBOL_GPL(mt76u_single_wr);
 
+static int
+mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
+               const struct mt76_reg_pair *data, int len)
+{
+       struct mt76_usb *usb = &dev->usb;
+
+       mutex_lock(&usb->usb_ctrl_mtx);
+       while (len > 0) {
+               __mt76u_wr(dev, base + data->reg, data->value);
+               len--;
+               data++;
+       }
+       mutex_unlock(&usb->usb_ctrl_mtx);
+
+       return 0;
+}
+
+static int
+mt76u_wr_rp(struct mt76_dev *dev, u32 base,
+           const struct mt76_reg_pair *data, int n)
+{
+       if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
+               return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
+       else
+               return mt76u_req_wr_rp(dev, base, data, n);
+}
+
+static int
+mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
+               int len)
+{
+       struct mt76_usb *usb = &dev->usb;
+
+       mutex_lock(&usb->usb_ctrl_mtx);
+       while (len > 0) {
+               data->value = __mt76u_rr(dev, base + data->reg);
+               len--;
+               data++;
+       }
+       mutex_unlock(&usb->usb_ctrl_mtx);
+
+       return 0;
+}
+
+static int
+mt76u_rd_rp(struct mt76_dev *dev, u32 base,
+           struct mt76_reg_pair *data, int n)
+{
+       if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
+               return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
+       else
+               return mt76u_req_rd_rp(dev, base, data, n);
+}
+
 static int
 mt76u_set_endpoints(struct usb_interface *intf,
                    struct mt76_usb *usb)
@@ -219,15 +276,17 @@ static int
 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
                 int nsgs, int len, int sglen)
 {
+       struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
        struct urb *urb = buf->urb;
        int i;
 
+       spin_lock_bh(&q->rx_page_lock);
        for (i = 0; i < nsgs; i++) {
                struct page *page;
                void *data;
                int offset;
 
-               data = netdev_alloc_frag(len);
+               data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
                if (!data)
                        break;
 
@@ -235,6 +294,7 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
                offset = data - page_address(page);
                sg_set_page(&urb->sg[i], page, sglen, offset);
        }
+       spin_unlock_bh(&q->rx_page_lock);
 
        if (i < nsgs) {
                int j;
@@ -326,9 +386,9 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
        min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
                  MT_FCE_INFO_LEN;
 
-       if (data_len < min_len || WARN_ON(!dma_len) ||
-           WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
-           WARN_ON(dma_len & 0x3))
+       if (data_len < min_len || !dma_len ||
+           dma_len + MT_DMA_HDR_LEN > data_len ||
+           (dma_len & 0x3))
                return -EINVAL;
        return dma_len;
 }
@@ -463,6 +523,7 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
        struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
        int i, err, nsgs;
 
+       spin_lock_init(&q->rx_page_lock);
        spin_lock_init(&q->lock);
        q->entry = devm_kzalloc(dev->dev,
                                MT_NUM_RX_ENTRIES * sizeof(*q->entry),
@@ -494,10 +555,21 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
 static void mt76u_free_rx(struct mt76_dev *dev)
 {
        struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+       struct page *page;
        int i;
 
        for (i = 0; i < q->ndesc; i++)
                mt76u_buf_free(&q->entry[i].ubuf);
+
+       spin_lock_bh(&q->rx_page_lock);
+       if (!q->rx_page.va)
+               goto out;
+
+       page = virt_to_page(q->rx_page.va);
+       __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
+       memset(&q->rx_page, 0, sizeof(q->rx_page));
+out:
+       spin_unlock_bh(&q->rx_page_lock);
 }
 
 static void mt76u_stop_rx(struct mt76_dev *dev)
@@ -509,40 +581,6 @@ static void mt76u_stop_rx(struct mt76_dev *dev)
                usb_kill_urb(q->entry[i].ubuf.urb);
 }
 
-int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
-{
-       struct sk_buff *iter, *last = skb;
-       u32 info, pad;
-
-       /* Buffer layout:
-        *      |   4B   | xfer len |      pad       |  4B  |
-        *      | TXINFO | pkt/cmd  | zero pad to 4B | zero |
-        *
-        * length field of TXINFO should be set to 'xfer len'.
-        */
-       info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
-              FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
-       put_unaligned_le32(info, skb_push(skb, sizeof(info)));
-
-       pad = round_up(skb->len, 4) + 4 - skb->len;
-       skb_walk_frags(skb, iter) {
-               last = iter;
-               if (!iter->next) {
-                       skb->data_len += pad;
-                       skb->len += pad;
-                       break;
-               }
-       }
-
-       if (unlikely(pad)) {
-               if (__skb_pad(last, pad, true))
-                       return -ENOMEM;
-               __skb_put(last, pad);
-       }
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
-
 static void mt76u_tx_tasklet(unsigned long data)
 {
        struct mt76_dev *dev = (struct mt76_dev *)data;
@@ -715,7 +753,7 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
                q = &dev->q_tx[i];
                spin_lock_init(&q->lock);
                INIT_LIST_HEAD(&q->swq);
-               q->hw_idx = q2hwq(i);
+               q->hw_idx = mt76_ac_to_hwq(i);
 
                q->entry = devm_kzalloc(dev->dev,
                                        MT_NUM_TX_ENTRIES * sizeof(*q->entry),
@@ -822,6 +860,8 @@ int mt76u_init(struct mt76_dev *dev,
                .wr = mt76u_wr,
                .rmw = mt76u_rmw,
                .copy = mt76u_copy,
+               .wr_rp = mt76u_wr_rp,
+               .rd_rp = mt76u_rd_rp,
        };
        struct mt76_usb *usb = &dev->usb;
 
index 070be803d4637aee4b538467c359cc5d0857f0c6..036be4163e69fcea8d0349a1f0316dbe6ffcd0aa 100644 (file)
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include <linux/firmware.h>
-
 #include "mt76.h"
-#include "dma.h"
-
-#define MT_CMD_HDR_LEN                 4
-
-#define MT_FCE_DMA_ADDR                        0x0230
-#define MT_FCE_DMA_LEN                 0x0234
-
-#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX        0x09a8
-
-struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len)
-{
-       struct sk_buff *skb;
-
-       skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
-       if (!skb)
-               return NULL;
-
-       skb_reserve(skb, MT_CMD_HDR_LEN);
-       skb_put_data(skb, data, len);
-
-       return skb;
-}
-EXPORT_SYMBOL_GPL(mt76u_mcu_msg_alloc);
 
 void mt76u_mcu_complete_urb(struct urb *urb)
 {
@@ -49,176 +24,6 @@ void mt76u_mcu_complete_urb(struct urb *urb)
 }
 EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb);
 
-static int mt76u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
-{
-       struct mt76_usb *usb = &dev->usb;
-       struct mt76u_buf *buf = &usb->mcu.res;
-       int i, ret;
-       u32 rxfce;
-
-       for (i = 0; i < 5; i++) {
-               if (!wait_for_completion_timeout(&usb->mcu.cmpl,
-                                                msecs_to_jiffies(300)))
-                       continue;
-
-               if (buf->urb->status)
-                       return -EIO;
-
-               rxfce = get_unaligned_le32(sg_virt(&buf->urb->sg[0]));
-               ret = mt76u_submit_buf(dev, USB_DIR_IN,
-                                      MT_EP_IN_CMD_RESP,
-                                      buf, GFP_KERNEL,
-                                      mt76u_mcu_complete_urb,
-                                      &usb->mcu.cmpl);
-               if (ret)
-                       return ret;
-
-               if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce))
-                       return 0;
-
-               dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
-                       FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
-                       seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
-       }
-
-       dev_err(dev->dev, "error: %s timed out\n", __func__);
-       return -ETIMEDOUT;
-}
-
-int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
-                      int cmd, bool wait_resp)
-{
-       struct usb_interface *intf = to_usb_interface(dev->dev);
-       struct usb_device *udev = interface_to_usbdev(intf);
-       struct mt76_usb *usb = &dev->usb;
-       unsigned int pipe;
-       int ret, sent;
-       u8 seq = 0;
-       u32 info;
-
-       if (test_bit(MT76_REMOVED, &dev->state))
-               return 0;
-
-       mutex_lock(&usb->mcu.mutex);
-
-       pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
-       if (wait_resp) {
-               seq = ++usb->mcu.msg_seq & 0xf;
-               if (!seq)
-                       seq = ++usb->mcu.msg_seq & 0xf;
-       }
-
-       info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
-              FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
-              MT_MCU_MSG_TYPE_CMD;
-       ret = mt76u_skb_dma_info(skb, CPU_TX_PORT, info);
-       if (ret)
-               goto out;
-
-       ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
-       if (ret)
-               goto out;
-
-       if (wait_resp)
-               ret = mt76u_mcu_wait_resp(dev, seq);
-
-out:
-       mutex_unlock(&usb->mcu.mutex);
-
-       consume_skb(skb);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(mt76u_mcu_send_msg);
-
-void mt76u_mcu_fw_reset(struct mt76_dev *dev)
-{
-       mt76u_vendor_request(dev, MT_VEND_DEV_MODE,
-                            USB_DIR_OUT | USB_TYPE_VENDOR,
-                            0x1, 0, NULL, 0);
-}
-EXPORT_SYMBOL_GPL(mt76u_mcu_fw_reset);
-
-static int
-__mt76u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
-                        const void *fw_data, int len, u32 dst_addr)
-{
-       u8 *data = sg_virt(&buf->urb->sg[0]);
-       DECLARE_COMPLETION_ONSTACK(cmpl);
-       __le32 info;
-       u32 val;
-       int err;
-
-       info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
-                          FIELD_PREP(MT_MCU_MSG_LEN, len) |
-                          MT_MCU_MSG_TYPE_CMD);
-
-       memcpy(data, &info, sizeof(info));
-       memcpy(data + sizeof(info), fw_data, len);
-       memset(data + sizeof(info) + len, 0, 4);
-
-       mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
-                       MT_FCE_DMA_ADDR, dst_addr);
-       len = roundup(len, 4);
-       mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
-                       MT_FCE_DMA_LEN, len << 16);
-
-       buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
-       err = mt76u_submit_buf(dev, USB_DIR_OUT,
-                              MT_EP_OUT_INBAND_CMD,
-                              buf, GFP_KERNEL,
-                              mt76u_mcu_complete_urb, &cmpl);
-       if (err < 0)
-               return err;
-
-       if (!wait_for_completion_timeout(&cmpl,
-                                        msecs_to_jiffies(1000))) {
-               dev_err(dev->dev, "firmware upload timed out\n");
-               usb_kill_urb(buf->urb);
-               return -ETIMEDOUT;
-       }
-
-       if (mt76u_urb_error(buf->urb)) {
-               dev_err(dev->dev, "firmware upload failed: %d\n",
-                       buf->urb->status);
-               return buf->urb->status;
-       }
-
-       val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
-       val++;
-       mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
-
-       return 0;
-}
-
-int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
-                          int data_len, u32 max_payload, u32 offset)
-{
-       int err, len, pos = 0, max_len = max_payload - 8;
-       struct mt76u_buf buf;
-
-       err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload,
-                             GFP_KERNEL);
-       if (err < 0)
-               return err;
-
-       while (data_len > 0) {
-               len = min_t(int, data_len, max_len);
-               err = __mt76u_mcu_fw_send_data(dev, &buf, data + pos,
-                                              len, offset + pos);
-               if (err < 0)
-                       break;
-
-               data_len -= len;
-               pos += len;
-               usleep_range(5000, 10000);
-       }
-       mt76u_buf_free(&buf);
-
-       return err;
-}
-EXPORT_SYMBOL_GPL(mt76u_mcu_fw_send_data);
-
 int mt76u_mcu_init_rx(struct mt76_dev *dev)
 {
        struct mt76_usb *usb = &dev->usb;
@@ -240,3 +45,12 @@ int mt76u_mcu_init_rx(struct mt76_dev *dev)
        return err;
 }
 EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx);
+
+void mt76u_mcu_deinit(struct mt76_dev *dev)
+{
+       struct mt76_usb *usb = &dev->usb;
+
+       usb_kill_urb(usb->mcu.res.urb);
+       mt76u_buf_free(&usb->mcu.res);
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_deinit);
index 97f760a3d59901fb18a0e24e8a1b91a01428ecd4..17cd7adb410910e7a147b5d20623635639be956f 100644 (file)
@@ -23,6 +23,7 @@ obj-$(CONFIG_QTNFMAC_PEARL_PCIE) += qtnfmac_pearl_pcie.o
 
 qtnfmac_pearl_pcie-objs += \
        shm_ipc.o \
-       pearl/pcie.o
+       pcie/pcie.o \
+       pcie/pearl_pcie.o
 
 qtnfmac_pearl_pcie-$(CONFIG_DEBUG_FS) += debug.o
index 323e47cea1e2ff579f8f0b392b44536d74b8d5db..528ca7f5e07074c8a09c14279435aa3431bdc203 100644 (file)
@@ -20,6 +20,9 @@
 #include <linux/netdevice.h>
 #include <linux/workqueue.h>
 
+#include "trans.h"
+#include "core.h"
+
 #define QTNF_MAX_MAC           3
 
 enum qtnf_fw_state {
@@ -57,10 +60,8 @@ struct qtnf_bus {
        struct qtnf_wmac *mac[QTNF_MAX_MAC];
        struct qtnf_qlink_transport trans;
        struct qtnf_hw_info hw_info;
-       char fwname[32];
        struct napi_struct mux_napi;
        struct net_device mux_dev;
-       struct completion firmware_init_complete;
        struct workqueue_struct *workqueue;
        struct work_struct fw_work;
        struct work_struct event_work;
index 4aa332f4646b1b79396b55ca6c3c2d2b552debdf..51b33ec78facf4f9758ad383820a7a2ed7a2d097 100644 (file)
@@ -141,8 +141,8 @@ qtnf_change_virtual_intf(struct wiphy *wiphy,
 
        ret = qtnf_cmd_send_change_intf_type(vif, type, mac_addr);
        if (ret) {
-               pr_err("VIF%u.%u: failed to change VIF type: %d\n",
-                      vif->mac->macid, vif->vifid, ret);
+               pr_err("VIF%u.%u: failed to change type to %d\n",
+                      vif->mac->macid, vif->vifid, type);
                return ret;
        }
 
@@ -216,7 +216,6 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
                eth_zero_addr(vif->mac_addr);
                eth_zero_addr(vif->bssid);
                vif->bss_priority = QTNF_DEF_BSS_PRIORITY;
-               vif->sta_state = QTNF_STA_DISCONNECTED;
                memset(&vif->wdev, 0, sizeof(vif->wdev));
                vif->wdev.wiphy = wiphy;
                vif->wdev.iftype = type;
@@ -229,18 +228,22 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
        if (params)
                mac_addr = params->macaddr;
 
-       if (qtnf_cmd_send_add_intf(vif, type, mac_addr)) {
-               pr_err("VIF%u.%u: failed to add VIF\n", mac->macid, vif->vifid);
+       ret = qtnf_cmd_send_add_intf(vif, type, mac_addr);
+       if (ret) {
+               pr_err("VIF%u.%u: failed to add VIF %pM\n",
+                      mac->macid, vif->vifid, mac_addr);
                goto err_cmd;
        }
 
        if (!is_valid_ether_addr(vif->mac_addr)) {
                pr_err("VIF%u.%u: FW reported bad MAC: %pM\n",
                       mac->macid, vif->vifid, vif->mac_addr);
+               ret = -EINVAL;
                goto err_mac;
        }
 
-       if (qtnf_core_net_attach(mac, vif, name, name_assign_t)) {
+       ret = qtnf_core_net_attach(mac, vif, name, name_assign_t);
+       if (ret) {
                pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid,
                       vif->vifid);
                goto err_net;
@@ -256,7 +259,7 @@ err_mac:
 err_cmd:
        vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
 
-       return ERR_PTR(-EFAULT);
+       return ERR_PTR(ret);
 }
 
 static int qtnf_mgmt_set_appie(struct qtnf_vif *vif,
@@ -335,12 +338,11 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        qtnf_scan_done(vif->mac, true);
 
        ret = qtnf_cmd_send_stop_ap(vif);
-       if (ret) {
+       if (ret)
                pr_err("VIF%u.%u: failed to stop AP operation in FW\n",
                       vif->mac->macid, vif->vifid);
 
-               netif_carrier_off(vif->netdev);
-       }
+       netif_carrier_off(vif->netdev);
 
        return ret;
 }
@@ -478,19 +480,31 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
        const struct qtnf_sta_node *sta_node;
        int ret;
 
-       sta_node = qtnf_sta_list_lookup_index(&vif->sta_list, idx);
+       switch (vif->wdev.iftype) {
+       case NL80211_IFTYPE_STATION:
+               if (idx != 0 || !vif->wdev.current_bss)
+                       return -ENOENT;
 
-       if (unlikely(!sta_node))
-               return -ENOENT;
+               ether_addr_copy(mac, vif->bssid);
+               break;
+       case NL80211_IFTYPE_AP:
+               sta_node = qtnf_sta_list_lookup_index(&vif->sta_list, idx);
+               if (unlikely(!sta_node))
+                       return -ENOENT;
 
-       ether_addr_copy(mac, sta_node->mac_addr);
+               ether_addr_copy(mac, sta_node->mac_addr);
+               break;
+       default:
+               return -ENOTSUPP;
+       }
 
-       ret = qtnf_cmd_get_sta_info(vif, sta_node->mac_addr, sinfo);
+       ret = qtnf_cmd_get_sta_info(vif, mac, sinfo);
 
-       if (unlikely(ret == -ENOENT)) {
-               qtnf_sta_list_del(vif, mac);
-               cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL);
-               sinfo->filled = 0;
+       if (vif->wdev.iftype == NL80211_IFTYPE_AP) {
+               if (ret == -ENOENT) {
+                       cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL);
+                       sinfo->filled = 0;
+               }
        }
 
        sinfo->generation = vif->generation;
@@ -521,9 +535,16 @@ static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev,
        int ret;
 
        ret = qtnf_cmd_send_del_key(vif, key_index, pairwise, mac_addr);
-       if (ret)
-               pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n",
-                      vif->mac->macid, vif->vifid, key_index, pairwise);
+       if (ret) {
+               if (ret == -ENOENT) {
+                       pr_debug("VIF%u.%u: key index %d out of bounds\n",
+                                vif->mac->macid, vif->vifid, key_index);
+               } else {
+                       pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n",
+                              vif->mac->macid, vif->vifid,
+                              key_index, pairwise);
+               }
+       }
 
        return ret;
 }
@@ -590,6 +611,7 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
        if (ret)
                pr_err("VIF%u.%u: failed to delete STA %pM\n",
                       vif->mac->macid, vif->vifid, params->mac);
+
        return ret;
 }
 
@@ -597,21 +619,25 @@ static int
 qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
 {
        struct qtnf_wmac *mac = wiphy_priv(wiphy);
+       int ret;
 
        cancel_delayed_work_sync(&mac->scan_timeout);
 
        mac->scan_req = request;
 
-       if (qtnf_cmd_send_scan(mac)) {
+       ret = qtnf_cmd_send_scan(mac);
+       if (ret) {
                pr_err("MAC%u: failed to start scan\n", mac->macid);
                mac->scan_req = NULL;
-               return -EFAULT;
+               goto out;
        }
 
+       pr_debug("MAC%u: scan started\n", mac->macid);
        queue_delayed_work(mac->bus->workqueue, &mac->scan_timeout,
                           QTNF_SCAN_TIMEOUT_SEC * HZ);
 
-       return 0;
+out:
+       return ret;
 }
 
 static int
@@ -624,9 +650,6 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev,
        if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
                return -EOPNOTSUPP;
 
-       if (vif->sta_state != QTNF_STA_DISCONNECTED)
-               return -EBUSY;
-
        if (sme->bssid)
                ether_addr_copy(vif->bssid, sme->bssid);
        else
@@ -634,13 +657,13 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev,
 
        ret = qtnf_cmd_send_connect(vif, sme);
        if (ret) {
-               pr_err("VIF%u.%u: failed to connect\n", vif->mac->macid,
-                      vif->vifid);
-               return ret;
+               pr_err("VIF%u.%u: failed to connect\n",
+                      vif->mac->macid, vif->vifid);
+               goto out;
        }
 
-       vif->sta_state = QTNF_STA_CONNECTING;
-       return 0;
+out:
+       return ret;
 }
 
 static int
@@ -662,22 +685,18 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
                goto out;
        }
 
-       qtnf_scan_done(mac, true);
-
-       if (vif->sta_state == QTNF_STA_DISCONNECTED)
-               goto out;
-
        ret = qtnf_cmd_send_disconnect(vif, reason_code);
-       if (ret) {
-               pr_err("VIF%u.%u: failed to disconnect\n", mac->macid,
-                      vif->vifid);
-               goto out;
+       if (ret)
+               pr_err("VIF%u.%u: failed to disconnect\n",
+                      mac->macid, vif->vifid);
+
+       if (vif->wdev.current_bss) {
+               netif_carrier_off(vif->netdev);
+               cfg80211_disconnected(vif->netdev, reason_code,
+                                     NULL, 0, true, GFP_KERNEL);
        }
 
 out:
-       if (vif->sta_state == QTNF_STA_CONNECTING)
-               vif->sta_state = QTNF_STA_DISCONNECTED;
-
        return ret;
 }
 
@@ -691,11 +710,8 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
        const struct cfg80211_chan_def *chandef = &wdev->chandef;
        struct ieee80211_channel *chan;
        struct qtnf_chan_stats stats;
-       struct qtnf_vif *vif;
        int ret;
 
-       vif = qtnf_netdev_get_priv(dev);
-
        sband = wiphy->bands[NL80211_BAND_2GHZ];
        if (sband && idx >= sband->n_channels) {
                idx -= sband->n_channels;
@@ -750,7 +766,6 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
        default:
                pr_debug("failed to get chan(%d) stats from card\n",
                         chan->hw_value);
-               ret = -EINVAL;
                break;
        }
 
@@ -773,6 +788,7 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
        ret = qtnf_cmd_get_channel(vif, chandef);
        if (ret) {
                pr_err("%s: failed to get channel: %d\n", ndev->name, ret);
+               ret = -ENODATA;
                goto out;
        }
 
@@ -782,6 +798,7 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
                       chandef->center_freq1, chandef->center_freq2,
                       chandef->width);
                ret = -ENODATA;
+               goto out;
        }
 
 out:
@@ -851,10 +868,8 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
 
        ret = qtnf_cmd_send_pm_set(vif, enabled ? QLINK_PM_AUTO_STANDBY :
                                   QLINK_PM_OFF, timeout);
-       if (ret) {
+       if (ret)
                pr_err("%s: failed to set PM mode ret=%d\n", dev->name, ret);
-               return ret;
-       }
 
        return ret;
 }
@@ -974,9 +989,16 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
 
        ret = qtnf_cmd_reg_notify(bus, req);
        if (ret) {
-               if (ret != -EOPNOTSUPP && ret != -EALREADY)
+               if (ret == -EOPNOTSUPP) {
+                       pr_warn("reg update not supported\n");
+               } else if (ret == -EALREADY) {
+                       pr_info("regulatory domain is already set to %c%c",
+                               req->alpha2[0], req->alpha2[1]);
+               } else {
                        pr_err("failed to update reg domain to %c%c\n",
                               req->alpha2[0], req->alpha2[1]);
+               }
+
                return;
        }
 
@@ -1091,6 +1113,10 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
        if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
                wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
 
+       if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_DWELL)
+               wiphy_ext_feature_set(wiphy,
+                                     NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+
        wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
                                    NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2;
 
@@ -1109,6 +1135,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
        if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR)
                wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
 
+       if (!(hw_info->hw_capab & QLINK_HW_CAPAB_OBSS_SCAN))
+               wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN;
+
 #ifdef CONFIG_PM
        if (macinfo->wowlan)
                wiphy->wowlan = macinfo->wowlan;
@@ -1123,6 +1152,15 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
                wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
        }
 
+       if (mac->macinfo.extended_capabilities_len) {
+               wiphy->extended_capabilities =
+                       mac->macinfo.extended_capabilities;
+               wiphy->extended_capabilities_mask =
+                       mac->macinfo.extended_capabilities_mask;
+               wiphy->extended_capabilities_len =
+                       mac->macinfo.extended_capabilities_len;
+       }
+
        strlcpy(wiphy->fw_version, hw_info->fw_version,
                sizeof(wiphy->fw_version));
        wiphy->hw_version = hw_info->hw_version;
@@ -1146,7 +1184,8 @@ void qtnf_netdev_updown(struct net_device *ndev, bool up)
        struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
 
        if (qtnf_cmd_send_updown_intf(vif, up))
-               pr_err("failed to send up/down command to FW\n");
+               pr_err("failed to send %s command to VIF%u.%u\n",
+                      up ? "UP" : "DOWN", vif->mac->macid, vif->vifid);
 }
 
 void qtnf_virtual_intf_cleanup(struct net_device *ndev)
@@ -1154,57 +1193,20 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev)
        struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
        struct qtnf_wmac *mac = wiphy_priv(vif->wdev.wiphy);
 
-       if (vif->wdev.iftype == NL80211_IFTYPE_STATION) {
-               switch (vif->sta_state) {
-               case QTNF_STA_DISCONNECTED:
-                       break;
-               case QTNF_STA_CONNECTING:
-                       cfg80211_connect_result(vif->netdev,
-                                               vif->bssid, NULL, 0,
-                                               NULL, 0,
-                                               WLAN_STATUS_UNSPECIFIED_FAILURE,
-                                               GFP_KERNEL);
-                       qtnf_disconnect(vif->wdev.wiphy, ndev,
-                                       WLAN_REASON_DEAUTH_LEAVING);
-                       break;
-               case QTNF_STA_CONNECTED:
-                       cfg80211_disconnected(vif->netdev,
-                                             WLAN_REASON_DEAUTH_LEAVING,
-                                             NULL, 0, 1, GFP_KERNEL);
-                       qtnf_disconnect(vif->wdev.wiphy, ndev,
-                                       WLAN_REASON_DEAUTH_LEAVING);
-                       break;
-               }
-
-               vif->sta_state = QTNF_STA_DISCONNECTED;
-       }
+       if (vif->wdev.iftype == NL80211_IFTYPE_STATION)
+               qtnf_disconnect(vif->wdev.wiphy, ndev,
+                               WLAN_REASON_DEAUTH_LEAVING);
 
        qtnf_scan_done(mac, true);
 }
 
 void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif)
 {
-       if (vif->wdev.iftype == NL80211_IFTYPE_STATION) {
-               switch (vif->sta_state) {
-               case QTNF_STA_CONNECTING:
-                       cfg80211_connect_result(vif->netdev,
-                                               vif->bssid, NULL, 0,
-                                               NULL, 0,
-                                               WLAN_STATUS_UNSPECIFIED_FAILURE,
-                                               GFP_KERNEL);
-                       break;
-               case QTNF_STA_CONNECTED:
-                       cfg80211_disconnected(vif->netdev,
-                                             WLAN_REASON_DEAUTH_LEAVING,
-                                             NULL, 0, 1, GFP_KERNEL);
-                       break;
-               case QTNF_STA_DISCONNECTED:
-                       break;
-               }
-       }
+       if (vif->wdev.iftype == NL80211_IFTYPE_STATION)
+               cfg80211_disconnected(vif->netdev, WLAN_REASON_DEAUTH_LEAVING,
+                                     NULL, 0, 1, GFP_KERNEL);
 
        cfg80211_shutdown_all_interfaces(vif->wdev.wiphy);
-       vif->sta_state = QTNF_STA_DISCONNECTED;
 }
 
 void qtnf_band_init_rates(struct ieee80211_supported_band *band)
index ae9e773005339b5ef22f23d5593297ffcbe04a7a..bfdc1ad30c13a7ffea95d95ead5b7961024999c6 100644 (file)
@@ -80,7 +80,6 @@ static int qtnf_cmd_resp_result_decode(enum qlink_cmd_result qcode)
 static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
                                    struct sk_buff *cmd_skb,
                                    struct sk_buff **response_skb,
-                                   u16 *result_code,
                                    size_t const_resp_size,
                                    size_t *var_resp_size)
 {
@@ -88,7 +87,8 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
        const struct qlink_resp *resp;
        struct sk_buff *resp_skb = NULL;
        u16 cmd_id;
-       u8 mac_id, vif_id;
+       u8 mac_id;
+       u8 vif_id;
        int ret;
 
        cmd = (struct qlink_cmd *)cmd_skb->data;
@@ -97,8 +97,11 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
        vif_id = cmd->vifid;
        cmd->mhdr.len = cpu_to_le16(cmd_skb->len);
 
-       if (unlikely(bus->fw_state != QTNF_FW_STATE_ACTIVE &&
-                    le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT)) {
+       pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id,
+                le16_to_cpu(cmd->cmd_id));
+
+       if (bus->fw_state != QTNF_FW_STATE_ACTIVE &&
+           le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT) {
                pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n",
                        mac_id, vif_id, le16_to_cpu(cmd->cmd_id),
                        bus->fw_state);
@@ -106,24 +109,16 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
                return -ENODEV;
        }
 
-       pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id,
-                le16_to_cpu(cmd->cmd_id));
-
        ret = qtnf_trans_send_cmd_with_resp(bus, cmd_skb, &resp_skb);
-
-       if (unlikely(ret))
+       if (ret)
                goto out;
 
        resp = (const struct qlink_resp *)resp_skb->data;
        ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id,
                                          const_resp_size);
-
-       if (unlikely(ret))
+       if (ret)
                goto out;
 
-       if (likely(result_code))
-               *result_code = le16_to_cpu(resp->result);
-
        /* Return length of variable part of response */
        if (response_skb && var_resp_size)
                *var_resp_size = le16_to_cpu(resp->mhdr.len) - const_resp_size;
@@ -134,14 +129,18 @@ out:
        else
                consume_skb(resp_skb);
 
+       if (!ret && resp)
+               return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result));
+
+       pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n",
+               mac_id, vif_id, le16_to_cpu(cmd->cmd_id), ret);
+
        return ret;
 }
 
-static inline int qtnf_cmd_send(struct qtnf_bus *bus,
-                               struct sk_buff *cmd_skb,
-                               u16 *result_code)
+static inline int qtnf_cmd_send(struct qtnf_bus *bus, struct sk_buff *cmd_skb)
 {
-       return qtnf_cmd_send_with_reply(bus, cmd_skb, NULL, result_code,
+       return qtnf_cmd_send_with_reply(bus, cmd_skb, NULL,
                                        sizeof(struct qlink_resp), NULL);
 }
 
@@ -228,7 +227,6 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
        struct sk_buff *cmd_skb;
        struct qlink_cmd_start_ap *cmd;
        struct qlink_auth_encr *aen;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret;
        int i;
 
@@ -329,30 +327,21 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
        }
 
        qtnf_bus_lock(vif->mac->bus);
-
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
-               goto out;
-
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
-                      vif->vifid, res_code);
-               ret = -EFAULT;
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
-       }
 
        netif_carrier_on(vif->netdev);
 
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
 int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif)
 {
        struct sk_buff *cmd_skb;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -362,23 +351,13 @@ int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif)
                return -ENOMEM;
 
        qtnf_bus_lock(vif->mac->bus);
-
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
-               goto out;
-
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
-                      vif->vifid, res_code);
-               ret = -EFAULT;
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
-       }
-
-       netif_carrier_off(vif->netdev);
 
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
@@ -386,7 +365,6 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg)
 {
        struct sk_buff *cmd_skb;
        struct qlink_cmd_mgmt_frame_register *cmd;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -401,20 +379,13 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg)
        cmd->frame_type = cpu_to_le16(frame_type);
        cmd->do_register = reg;
 
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
-               goto out;
-
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
-                      vif->vifid, res_code);
-               ret = -EFAULT;
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
-       }
 
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
@@ -423,7 +394,6 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
 {
        struct sk_buff *cmd_skb;
        struct qlink_cmd_mgmt_frame_tx *cmd;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret;
 
        if (sizeof(*cmd) + len > QTNF_MAX_CMD_BUF_SIZE) {
@@ -448,20 +418,13 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
        if (len && buf)
                qtnf_cmd_skb_put_buffer(cmd_skb, buf, len);
 
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
-                      vif->vifid, res_code);
-               ret = -EFAULT;
-               goto out;
-       }
-
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
@@ -469,7 +432,6 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
                                 const u8 *buf, size_t len)
 {
        struct sk_buff *cmd_skb;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret;
 
        if (len > QTNF_MAX_CMD_BUF_SIZE) {
@@ -487,21 +449,13 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
        qtnf_cmd_tlv_ie_set_add(cmd_skb, frame_type, buf, len);
 
        qtnf_bus_lock(vif->mac->bus);
-
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
-               goto out;
-
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u frame %u: CMD failed: %u\n", vif->mac->macid,
-                      vif->vifid, frame_type, res_code);
-               ret = -EFAULT;
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
-       }
 
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
@@ -544,6 +498,9 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
                rate_dst->flags |= RATE_INFO_FLAGS_MCS;
        else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_VHT_MCS)
                rate_dst->flags |= RATE_INFO_FLAGS_VHT_MCS;
+
+       if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_SHORT_GI)
+               rate_dst->flags |= RATE_INFO_FLAGS_SHORT_GI;
 }
 
 static void
@@ -730,7 +687,6 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
        struct qlink_cmd_get_sta_info *cmd;
        const struct qlink_resp_get_sta_info *resp;
        size_t var_resp_len;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -745,31 +701,13 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
        ether_addr_copy(cmd->sta_addr, sta_mac);
 
        ret = qtnf_cmd_send_with_reply(vif->mac->bus, cmd_skb, &resp_skb,
-                                      &res_code, sizeof(*resp),
-                                      &var_resp_len);
-
-       if (unlikely(ret))
-               goto out;
-
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               switch (res_code) {
-               case QLINK_CMD_RESULT_ENOTFOUND:
-                       pr_warn("VIF%u.%u: %pM STA not found\n",
-                               vif->mac->macid, vif->vifid, sta_mac);
-                       ret = -ENOENT;
-                       break;
-               default:
-                       pr_err("VIF%u.%u: can't get info for %pM: %u\n",
-                              vif->mac->macid, vif->vifid, sta_mac, res_code);
-                       ret = -EFAULT;
-                       break;
-               }
+                                      sizeof(*resp), &var_resp_len);
+       if (ret)
                goto out;
-       }
 
        resp = (const struct qlink_resp_get_sta_info *)resp_skb->data;
 
-       if (unlikely(!ether_addr_equal(sta_mac, resp->sta_addr))) {
+       if (!ether_addr_equal(sta_mac, resp->sta_addr)) {
                pr_err("VIF%u.%u: wrong mac in reply: %pM != %pM\n",
                       vif->mac->macid, vif->vifid, resp->sta_addr, sta_mac);
                ret = -EINVAL;
@@ -795,7 +733,6 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
        struct sk_buff *cmd_skb, *resp_skb = NULL;
        struct qlink_cmd_manage_intf *cmd;
        const struct qlink_resp_manage_intf *resp;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -828,17 +765,9 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
                eth_zero_addr(cmd->intf_info.mac_addr);
 
        ret = qtnf_cmd_send_with_reply(vif->mac->bus, cmd_skb, &resp_skb,
-                                      &res_code, sizeof(*resp), NULL);
-
-       if (unlikely(ret))
-               goto out;
-
-       ret = qtnf_cmd_resp_result_decode(res_code);
-       if (ret) {
-               pr_err("VIF%u.%u: CMD %d failed: %u\n", vif->mac->macid,
-                      vif->vifid, cmd_type, res_code);
+                                      sizeof(*resp), NULL);
+       if (ret)
                goto out;
-       }
 
        resp = (const struct qlink_resp_manage_intf *)resp_skb->data;
        ether_addr_copy(vif->mac_addr, resp->intf_info.mac_addr);
@@ -868,7 +797,6 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
 {
        struct sk_buff *cmd_skb;
        struct qlink_cmd_manage_intf *cmd;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -897,17 +825,9 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
 
        eth_zero_addr(cmd->intf_info.mac_addr);
 
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
-               goto out;
-
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
-                      vif->vifid, res_code);
-               ret = -EFAULT;
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
-       }
 
 out:
        qtnf_bus_unlock(vif->mac->bus);
@@ -1353,8 +1273,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
                ext_capa_mask = NULL;
        }
 
-       kfree(mac->macinfo.extended_capabilities);
-       kfree(mac->macinfo.extended_capabilities_mask);
+       qtnf_mac_ext_caps_free(mac);
        mac->macinfo.extended_capabilities = ext_capa;
        mac->macinfo.extended_capabilities_mask = ext_capa_mask;
        mac->macinfo.extended_capabilities_len = ext_capa_len;
@@ -1732,7 +1651,6 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
        struct sk_buff *cmd_skb, *resp_skb = NULL;
        const struct qlink_resp_get_mac_info *resp;
        size_t var_data_len;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
@@ -1742,18 +1660,11 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
                return -ENOMEM;
 
        qtnf_bus_lock(mac->bus);
-
-       ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+       ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
                                       sizeof(*resp), &var_data_len);
-       if (unlikely(ret))
+       if (ret)
                goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
-               ret = -EFAULT;
-               goto out;
-       }
-
        resp = (const struct qlink_resp_get_mac_info *)resp_skb->data;
        qtnf_cmd_resp_proc_mac_info(mac, resp);
        ret = qtnf_parse_variable_mac_info(mac, resp->var_info, var_data_len);
@@ -1769,7 +1680,6 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus)
 {
        struct sk_buff *cmd_skb, *resp_skb = NULL;
        const struct qlink_resp_get_hw_info *resp;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
        size_t info_len;
 
@@ -1780,18 +1690,10 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus)
                return -ENOMEM;
 
        qtnf_bus_lock(bus);
-
-       ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code,
+       ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
                                       sizeof(*resp), &info_len);
-
-       if (unlikely(ret))
-               goto out;
-
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("cmd exec failed: 0x%.4X\n", res_code);
-               ret = -EFAULT;
+       if (ret)
                goto out;
-       }
 
        resp = (const struct qlink_resp_get_hw_info *)resp_skb->data;
        ret = qtnf_cmd_resp_proc_hw_info(bus, resp, info_len);
@@ -1810,7 +1712,6 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
        size_t info_len;
        struct qlink_cmd_band_info_get *cmd;
        struct qlink_resp_band_info_get *resp;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
        u8 qband;
 
@@ -1838,18 +1739,10 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
        cmd->band = qband;
 
        qtnf_bus_lock(mac->bus);
-
-       ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+       ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
                                       sizeof(*resp), &info_len);
-
-       if (unlikely(ret))
-               goto out;
-
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
-               ret = -EFAULT;
+       if (ret)
                goto out;
-       }
 
        resp = (struct qlink_resp_band_info_get *)resp_skb->data;
        if (resp->band != qband) {
@@ -1873,7 +1766,6 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac)
        struct sk_buff *cmd_skb, *resp_skb = NULL;
        size_t response_size;
        struct qlink_resp_phy_params *resp;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
@@ -1883,19 +1775,11 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac)
                return -ENOMEM;
 
        qtnf_bus_lock(mac->bus);
-
-       ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+       ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
                                       sizeof(*resp), &response_size);
-
-       if (unlikely(ret))
+       if (ret)
                goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
-               ret = -EFAULT;
-               goto out;
-       }
-
        resp = (struct qlink_resp_phy_params *)resp_skb->data;
        ret = qtnf_cmd_resp_proc_phy_params(mac, resp->info, response_size);
 
@@ -1910,7 +1794,6 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
 {
        struct wiphy *wiphy = priv_to_wiphy(mac);
        struct sk_buff *cmd_skb;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
@@ -1931,26 +1814,19 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
                qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS,
                                        wiphy->coverage_class);
 
-       ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
-               goto out;
-
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
-               ret = -EFAULT;
+       ret = qtnf_cmd_send(mac->bus, cmd_skb);
+       if (ret)
                goto out;
-       }
 
 out:
        qtnf_bus_unlock(mac->bus);
+
        return ret;
 }
 
 int qtnf_cmd_send_init_fw(struct qtnf_bus *bus)
 {
        struct sk_buff *cmd_skb;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
@@ -1960,20 +1836,13 @@ int qtnf_cmd_send_init_fw(struct qtnf_bus *bus)
                return -ENOMEM;
 
        qtnf_bus_lock(bus);
-
-       ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
+       ret = qtnf_cmd_send(bus, cmd_skb);
+       if (ret)
                goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("cmd exec failed: 0x%.4X\n", res_code);
-               ret = -EFAULT;
-               goto out;
-       }
-
 out:
        qtnf_bus_unlock(bus);
+
        return ret;
 }
 
@@ -1988,9 +1857,7 @@ void qtnf_cmd_send_deinit_fw(struct qtnf_bus *bus)
                return;
 
        qtnf_bus_lock(bus);
-
-       qtnf_cmd_send(bus, cmd_skb, NULL);
-
+       qtnf_cmd_send(bus, cmd_skb);
        qtnf_bus_unlock(bus);
 }
 
@@ -1999,7 +1866,6 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
 {
        struct sk_buff *cmd_skb;
        struct qlink_cmd_add_key *cmd;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2031,19 +1897,13 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
                                         params->seq,
                                         params->seq_len);
 
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-       if (unlikely(ret))
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n",
-                      vif->mac->macid, vif->vifid, res_code);
-               ret = -EFAULT;
-               goto out;
-       }
-
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
@@ -2052,7 +1912,6 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
 {
        struct sk_buff *cmd_skb;
        struct qlink_cmd_del_key *cmd;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2072,19 +1931,14 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
 
        cmd->key_index = key_index;
        cmd->pairwise = pairwise;
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-       if (unlikely(ret))
-               goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n",
-                      vif->mac->macid, vif->vifid, res_code);
-               ret = -EFAULT;
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
-       }
 
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
@@ -2093,7 +1947,6 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index,
 {
        struct sk_buff *cmd_skb;
        struct qlink_cmd_set_def_key *cmd;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2108,19 +1961,14 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index,
        cmd->key_index = key_index;
        cmd->unicast = unicast;
        cmd->multicast = multicast;
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-       if (unlikely(ret))
-               goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
-                      vif->vifid, res_code);
-               ret = -EFAULT;
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
-       }
 
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
@@ -2128,7 +1976,6 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index)
 {
        struct sk_buff *cmd_skb;
        struct qlink_cmd_set_def_mgmt_key *cmd;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2141,19 +1988,14 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index)
 
        cmd = (struct qlink_cmd_set_def_mgmt_key *)cmd_skb->data;
        cmd->key_index = key_index;
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-       if (unlikely(ret))
-               goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
-                      vif->vifid, res_code);
-               ret = -EFAULT;
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
-       }
 
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
@@ -2183,7 +2025,6 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
 {
        struct sk_buff *cmd_skb;
        struct qlink_cmd_change_sta *cmd;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2214,19 +2055,13 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
                goto out;
        }
 
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-       if (unlikely(ret))
-               goto out;
-
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
-                      vif->vifid, res_code);
-               ret = -EFAULT;
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
-       }
 
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
@@ -2235,7 +2070,6 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif,
 {
        struct sk_buff *cmd_skb;
        struct qlink_cmd_del_sta *cmd;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2256,19 +2090,13 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif,
        cmd->subtype = params->subtype;
        cmd->reason_code = cpu_to_le16(params->reason_code);
 
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-       if (unlikely(ret))
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
-                      vif->vifid, res_code);
-               ret = -EFAULT;
-               goto out;
-       }
-
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
@@ -2312,7 +2140,6 @@ static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
 int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
 {
        struct sk_buff *cmd_skb;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        struct ieee80211_channel *sc;
        struct cfg80211_scan_request *scan_req = mac->scan_req;
        int n_channels;
@@ -2370,20 +2197,28 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
                                         scan_req->mac_addr_mask);
        }
 
-       ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
+       if (scan_req->flags & NL80211_SCAN_FLAG_FLUSH) {
+               pr_debug("MAC%u: flush cache before scan\n", mac->macid);
 
-       if (unlikely(ret))
-               goto out;
+               qtnf_cmd_skb_put_tlv_tag(cmd_skb, QTN_TLV_ID_SCAN_FLUSH);
+       }
 
-       pr_debug("MAC%u: scan started\n", mac->macid);
+       if (scan_req->duration) {
+               pr_debug("MAC%u: %s scan duration %u\n", mac->macid,
+                        scan_req->duration_mandatory ? "mandatory" : "max",
+                        scan_req->duration);
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
-               ret = -EFAULT;
-               goto out;
+               qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_SCAN_DWELL,
+                                        scan_req->duration);
        }
+
+       ret = qtnf_cmd_send(mac->bus, cmd_skb);
+       if (ret)
+               goto out;
+
 out:
        qtnf_bus_unlock(mac->bus);
+
        return ret;
 }
 
@@ -2393,7 +2228,6 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
        struct sk_buff *cmd_skb;
        struct qlink_cmd_connect *cmd;
        struct qlink_auth_encr *aen;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret;
        int i;
        u32 connect_flags = 0;
@@ -2474,20 +2308,13 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
                qtnf_cmd_channel_tlv_add(cmd_skb, sme->channel);
 
        qtnf_bus_lock(vif->mac->bus);
-
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
-                      vif->vifid, res_code);
-               ret = -EFAULT;
-               goto out;
-       }
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
@@ -2495,7 +2322,6 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code)
 {
        struct sk_buff *cmd_skb;
        struct qlink_cmd_disconnect *cmd;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2509,19 +2335,13 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code)
        cmd = (struct qlink_cmd_disconnect *)cmd_skb->data;
        cmd->reason = cpu_to_le16(reason_code);
 
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
-                      vif->vifid, res_code);
-               ret = -EFAULT;
-               goto out;
-       }
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
@@ -2529,7 +2349,6 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up)
 {
        struct sk_buff *cmd_skb;
        struct qlink_cmd_updown *cmd;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2542,20 +2361,13 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up)
        cmd->if_up = !!up;
 
        qtnf_bus_lock(vif->mac->bus);
-
-       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
+       ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+       if (ret)
                goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
-                      vif->vifid, res_code);
-               ret = -EFAULT;
-               goto out;
-       }
 out:
        qtnf_bus_unlock(vif->mac->bus);
+
        return ret;
 }
 
@@ -2563,7 +2375,6 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req)
 {
        struct sk_buff *cmd_skb;
        int ret;
-       u16 res_code;
        struct qlink_cmd_reg_notify *cmd;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
@@ -2604,29 +2415,10 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req)
        }
 
        qtnf_bus_lock(bus);
-
-       ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+       ret = qtnf_cmd_send(bus, cmd_skb);
        if (ret)
                goto out;
 
-       switch (res_code) {
-       case QLINK_CMD_RESULT_ENOTSUPP:
-               pr_warn("reg update not supported\n");
-               ret = -EOPNOTSUPP;
-               break;
-       case QLINK_CMD_RESULT_EALREADY:
-               pr_info("regulatory domain is already set to %c%c",
-                       req->alpha2[0], req->alpha2[1]);
-               ret = -EALREADY;
-               break;
-       case QLINK_CMD_RESULT_OK:
-               ret = 0;
-               break;
-       default:
-               ret = -EFAULT;
-               break;
-       }
-
 out:
        qtnf_bus_unlock(bus);
 
@@ -2640,7 +2432,6 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
        struct qlink_cmd_get_chan_stats *cmd;
        struct qlink_resp_get_chan_stats *resp;
        size_t var_data_len;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret = 0;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
@@ -2654,25 +2445,10 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
        cmd = (struct qlink_cmd_get_chan_stats *)cmd_skb->data;
        cmd->channel = cpu_to_le16(channel);
 
-       ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+       ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
                                       sizeof(*resp), &var_data_len);
-       if (unlikely(ret)) {
-               qtnf_bus_unlock(mac->bus);
-               return ret;
-       }
-
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               switch (res_code) {
-               case QLINK_CMD_RESULT_ENOTFOUND:
-                       ret = -ENOENT;
-                       break;
-               default:
-                       pr_err("cmd exec failed: 0x%.4X\n", res_code);
-                       ret = -EFAULT;
-                       break;
-               }
+       if (ret)
                goto out;
-       }
 
        resp = (struct qlink_resp_get_chan_stats *)resp_skb->data;
        ret = qtnf_cmd_resp_proc_chan_stat_info(stats, resp->info,
@@ -2681,6 +2457,7 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
 out:
        qtnf_bus_unlock(mac->bus);
        consume_skb(resp_skb);
+
        return ret;
 }
 
@@ -2690,7 +2467,6 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
        struct qtnf_wmac *mac = vif->mac;
        struct qlink_cmd_chan_switch *cmd;
        struct sk_buff *cmd_skb;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, vif->vifid,
@@ -2707,32 +2483,13 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
        cmd->block_tx = params->block_tx;
        cmd->beacon_count = params->count;
 
-       ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
+       ret = qtnf_cmd_send(mac->bus, cmd_skb);
+       if (ret)
                goto out;
 
-       switch (res_code) {
-       case QLINK_CMD_RESULT_OK:
-               ret = 0;
-               break;
-       case QLINK_CMD_RESULT_ENOTFOUND:
-               ret = -ENOENT;
-               break;
-       case QLINK_CMD_RESULT_ENOTSUPP:
-               ret = -EOPNOTSUPP;
-               break;
-       case QLINK_CMD_RESULT_EALREADY:
-               ret = -EALREADY;
-               break;
-       case QLINK_CMD_RESULT_INVALID:
-       default:
-               ret = -EFAULT;
-               break;
-       }
-
 out:
        qtnf_bus_unlock(mac->bus);
+
        return ret;
 }
 
@@ -2742,7 +2499,6 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef)
        const struct qlink_resp_channel_get *resp;
        struct sk_buff *cmd_skb;
        struct sk_buff *resp_skb = NULL;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        int ret;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2752,25 +2508,18 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef)
                return -ENOMEM;
 
        qtnf_bus_lock(bus);
-
-       ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code,
+       ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
                                       sizeof(*resp), NULL);
-
-       qtnf_bus_unlock(bus);
-
-       if (unlikely(ret))
+       if (ret)
                goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               ret = -ENODATA;
-               goto out;
-       }
-
        resp = (const struct qlink_resp_channel_get *)resp_skb->data;
        qlink_chandef_q2cfg(priv_to_wiphy(vif->mac), &resp->chan, chdef);
 
 out:
+       qtnf_bus_unlock(bus);
        consume_skb(resp_skb);
+
        return ret;
 }
 
@@ -2782,7 +2531,6 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
        struct sk_buff *cmd_skb;
        struct qlink_cmd_start_cac *cmd;
        int ret;
-       u16 res_code;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
                                            QLINK_CMD_START_CAC,
@@ -2795,19 +2543,12 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
        qlink_chandef_cfg2q(chdef, &cmd->chan);
 
        qtnf_bus_lock(bus);
-       ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
-       qtnf_bus_unlock(bus);
-
+       ret = qtnf_cmd_send(bus, cmd_skb);
        if (ret)
-               return ret;
+               goto out;
 
-       switch (res_code) {
-       case QLINK_CMD_RESULT_OK:
-               break;
-       default:
-               ret = -EOPNOTSUPP;
-               break;
-       }
+out:
+       qtnf_bus_unlock(bus);
 
        return ret;
 }
@@ -2819,7 +2560,6 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
        struct sk_buff *cmd_skb;
        struct qlink_tlv_hdr *tlv;
        size_t acl_size = qtnf_cmd_acl_data_size(params);
-       u16 res_code;
        int ret;
 
        cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2834,22 +2574,12 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
        qlink_acl_data_cfg2q(params, (struct qlink_acl_data *)tlv->val);
 
        qtnf_bus_lock(bus);
-       ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
-       qtnf_bus_unlock(bus);
-
-       if (unlikely(ret))
-               return ret;
+       ret = qtnf_cmd_send(bus, cmd_skb);
+       if (ret)
+               goto out;
 
-       switch (res_code) {
-       case QLINK_CMD_RESULT_OK:
-               break;
-       case QLINK_CMD_RESULT_INVALID:
-               ret = -EINVAL;
-               break;
-       default:
-               ret = -EOPNOTSUPP;
-               break;
-       }
+out:
+       qtnf_bus_unlock(bus);
 
        return ret;
 }
@@ -2858,7 +2588,6 @@ int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout)
 {
        struct qtnf_bus *bus = vif->mac->bus;
        struct sk_buff *cmd_skb;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        struct qlink_cmd_pm_set *cmd;
        int ret = 0;
 
@@ -2873,18 +2602,13 @@ int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout)
 
        qtnf_bus_lock(bus);
 
-       ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
+       ret = qtnf_cmd_send(bus, cmd_skb);
+       if (ret)
                goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("cmd exec failed: 0x%.4X\n", res_code);
-               ret = -EFAULT;
-       }
-
 out:
        qtnf_bus_unlock(bus);
+
        return ret;
 }
 
@@ -2893,7 +2617,6 @@ int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
 {
        struct qtnf_bus *bus = vif->mac->bus;
        struct sk_buff *cmd_skb;
-       u16 res_code = QLINK_CMD_RESULT_OK;
        struct qlink_cmd_wowlan_set *cmd;
        u32 triggers = 0;
        int count = 0;
@@ -2929,16 +2652,10 @@ int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
 
        cmd->triggers = cpu_to_le32(triggers);
 
-       ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
-
-       if (unlikely(ret))
+       ret = qtnf_cmd_send(bus, cmd_skb);
+       if (ret)
                goto out;
 
-       if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
-               pr_err("cmd exec failed: 0x%.4X\n", res_code);
-               ret = -EFAULT;
-       }
-
 out:
        qtnf_bus_unlock(bus);
        return ret;
index 19abbc4e23e068498118b562833197c053e999c4..5d18a4a917c9041cbb3ad93596128092840642f8 100644 (file)
@@ -304,6 +304,19 @@ void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac)
        }
 }
 
+void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac)
+{
+       if (mac->macinfo.extended_capabilities_len) {
+               kfree(mac->macinfo.extended_capabilities);
+               mac->macinfo.extended_capabilities = NULL;
+
+               kfree(mac->macinfo.extended_capabilities_mask);
+               mac->macinfo.extended_capabilities_mask = NULL;
+
+               mac->macinfo.extended_capabilities_len = 0;
+       }
+}
+
 static void qtnf_vif_reset_handler(struct work_struct *work)
 {
        struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work);
@@ -370,6 +383,7 @@ static void qtnf_mac_scan_timeout(struct work_struct *work)
 static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
                                             unsigned int macid)
 {
+       struct qtnf_vif *vif;
        struct wiphy *wiphy;
        struct qtnf_wmac *mac;
        unsigned int i;
@@ -382,18 +396,20 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
 
        mac->macid = macid;
        mac->bus = bus;
+       mutex_init(&mac->mac_lock);
+       INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout);
 
        for (i = 0; i < QTNF_MAX_INTF; i++) {
-               memset(&mac->iflist[i], 0, sizeof(struct qtnf_vif));
-               mac->iflist[i].wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
-               mac->iflist[i].mac = mac;
-               mac->iflist[i].vifid = i;
-               qtnf_sta_list_init(&mac->iflist[i].sta_list);
-               mutex_init(&mac->mac_lock);
-               INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout);
-               mac->iflist[i].stats64 =
-                       netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
-               if (!mac->iflist[i].stats64)
+               vif = &mac->iflist[i];
+
+               memset(vif, 0, sizeof(*vif));
+               vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+               vif->mac = mac;
+               vif->vifid = i;
+               qtnf_sta_list_init(&vif->sta_list);
+
+               vif->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+               if (!vif->stats64)
                        pr_warn("VIF%u.%u: per cpu stats allocation failed\n",
                                macid, i);
        }
@@ -493,8 +509,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
        }
 
        qtnf_mac_iface_comb_free(mac);
-       kfree(mac->macinfo.extended_capabilities);
-       kfree(mac->macinfo.extended_capabilities_mask);
+       qtnf_mac_ext_caps_free(mac);
        kfree(mac->macinfo.wowlan);
        wiphy_free(wiphy);
        bus->mac[macid] = NULL;
index a1e338a1f055a7ea906c5fc8ecfd979db84efe03..293055049caab8d5d11eff97edde4152fe38c7ac 100644 (file)
@@ -64,12 +64,6 @@ struct qtnf_sta_list {
        atomic_t size;
 };
 
-enum qtnf_sta_state {
-       QTNF_STA_DISCONNECTED,
-       QTNF_STA_CONNECTING,
-       QTNF_STA_CONNECTED
-};
-
 struct qtnf_vif {
        struct wireless_dev wdev;
        u8 bssid[ETH_ALEN];
@@ -77,7 +71,6 @@ struct qtnf_vif {
        u8 vifid;
        u8 bss_priority;
        u8 bss_status;
-       enum qtnf_sta_state sta_state;
        u16 mgmt_frames_bitmask;
        struct net_device *netdev;
        struct qtnf_wmac *mac;
@@ -151,6 +144,7 @@ struct qtnf_hw_info {
 struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
 struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
 void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
+void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac);
 struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
 int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
                         const char *name, unsigned char name_assign_type);
index 68da81bec4e995b89272262ac3a3359fbef82c52..8b542b431b75af32a374666f20d2383526bdc85d 100644 (file)
@@ -171,24 +171,14 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
                return -EPROTO;
        }
 
-       if (vif->sta_state != QTNF_STA_CONNECTING) {
-               pr_err("VIF%u.%u: BSS_JOIN event when STA is not connecting\n",
-                      vif->mac->macid, vif->vifid);
-               return -EPROTO;
-       }
-
        pr_debug("VIF%u.%u: BSSID:%pM\n", vif->mac->macid, vif->vifid,
                 join_info->bssid);
 
        cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, NULL,
                                0, le16_to_cpu(join_info->status), GFP_KERNEL);
 
-       if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS) {
-               vif->sta_state = QTNF_STA_CONNECTED;
+       if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS)
                netif_carrier_on(vif->netdev);
-       } else {
-               vif->sta_state = QTNF_STA_DISCONNECTED;
-       }
 
        return 0;
 }
@@ -211,16 +201,10 @@ qtnf_event_handle_bss_leave(struct qtnf_vif *vif,
                return -EPROTO;
        }
 
-       if (vif->sta_state != QTNF_STA_CONNECTED)
-               pr_warn("VIF%u.%u: BSS_LEAVE event when STA is not connected\n",
-                       vif->mac->macid, vif->vifid);
-
        pr_debug("VIF%u.%u: disconnected\n", vif->mac->macid, vif->vifid);
 
        cfg80211_disconnected(vif->netdev, le16_to_cpu(leave_info->reason),
                              NULL, 0, 0, GFP_KERNEL);
-
-       vif->sta_state = QTNF_STA_DISCONNECTED;
        netif_carrier_off(vif->netdev);
 
        return 0;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
new file mode 100644 (file)
index 0000000..16795db
--- /dev/null
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018 Quantenna Communications, Inc. All rights reserved. */
+
+#include <linux/printk.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/seq_file.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
+#include "pcie_priv.h"
+#include "bus.h"
+#include "shm_ipc.h"
+#include "core.h"
+#include "debug.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt)    "qtnf_pcie: %s: " fmt, __func__
+
+#define QTN_SYSCTL_BAR 0
+#define QTN_SHMEM_BAR  2
+#define QTN_DMA_BAR    3
+
+int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+       struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+       int ret;
+
+       ret = qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len);
+
+       if (ret == -ETIMEDOUT) {
+               pr_err("EP firmware is dead\n");
+               bus->fw_state = QTNF_FW_STATE_EP_DEAD;
+       }
+
+       return ret;
+}
+
+int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv)
+{
+       struct sk_buff **vaddr;
+       int len;
+
+       len = priv->tx_bd_num * sizeof(*priv->tx_skb) +
+               priv->rx_bd_num * sizeof(*priv->rx_skb);
+       vaddr = devm_kzalloc(&priv->pdev->dev, len, GFP_KERNEL);
+
+       if (!vaddr)
+               return -ENOMEM;
+
+       priv->tx_skb = vaddr;
+
+       vaddr += priv->tx_bd_num;
+       priv->rx_skb = vaddr;
+
+       return 0;
+}
+
+void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus)
+{
+       struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+       struct pci_dev *pdev = priv->pdev;
+
+       get_device(&pdev->dev);
+       schedule_work(&bus->fw_work);
+}
+
+static int qtnf_dbg_mps_show(struct seq_file *s, void *data)
+{
+       struct qtnf_bus *bus = dev_get_drvdata(s->private);
+       struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+
+       seq_printf(s, "%d\n", priv->mps);
+
+       return 0;
+}
+
+static int qtnf_dbg_msi_show(struct seq_file *s, void *data)
+{
+       struct qtnf_bus *bus = dev_get_drvdata(s->private);
+       struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+
+       seq_printf(s, "%u\n", priv->msi_enabled);
+
+       return 0;
+}
+
+static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
+{
+       struct qtnf_bus *bus = dev_get_drvdata(s->private);
+       struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+
+       seq_printf(s, "shm_ipc_ep_in.tx_packet_count(%zu)\n",
+                  priv->shm_ipc_ep_in.tx_packet_count);
+       seq_printf(s, "shm_ipc_ep_in.rx_packet_count(%zu)\n",
+                  priv->shm_ipc_ep_in.rx_packet_count);
+       seq_printf(s, "shm_ipc_ep_out.tx_packet_count(%zu)\n",
+                  priv->shm_ipc_ep_out.tx_timeout_count);
+       seq_printf(s, "shm_ipc_ep_out.rx_packet_count(%zu)\n",
+                  priv->shm_ipc_ep_out.rx_packet_count);
+
+       return 0;
+}
+
+void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success,
+                           const char *drv_name)
+{
+       struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+       struct pci_dev *pdev = priv->pdev;
+       int ret;
+
+       if (boot_success) {
+               bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE;
+
+               ret = qtnf_core_attach(bus);
+               if (ret) {
+                       pr_err("failed to attach core\n");
+                       boot_success = false;
+               }
+       }
+
+       if (boot_success) {
+               qtnf_debugfs_init(bus, drv_name);
+               qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
+               qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
+               qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
+       } else {
+               bus->fw_state = QTNF_FW_STATE_DETACHED;
+       }
+
+       put_device(&pdev->dev);
+}
+
+static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv)
+{
+       struct pci_dev *pdev = priv->pdev;
+       struct pci_dev *parent;
+       int mps_p, mps_o, mps_m, mps;
+       int ret;
+
+       /* current mps */
+       mps_o = pcie_get_mps(pdev);
+
+       /* maximum supported mps */
+       mps_m = 128 << pdev->pcie_mpss;
+
+       /* suggested new mps value */
+       mps = mps_m;
+
+       if (pdev->bus && pdev->bus->self) {
+               /* parent (bus) mps */
+               parent = pdev->bus->self;
+
+               if (pci_is_pcie(parent)) {
+                       mps_p = pcie_get_mps(parent);
+                       mps = min(mps_m, mps_p);
+               }
+       }
+
+       ret = pcie_set_mps(pdev, mps);
+       if (ret) {
+               pr_err("failed to set mps to %d, keep using current %d\n",
+                      mps, mps_o);
+               priv->mps = mps_o;
+               return;
+       }
+
+       pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m);
+       priv->mps = mps;
+}
+
+static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv, bool use_msi)
+{
+       struct pci_dev *pdev = priv->pdev;
+
+       /* fall back to legacy INTx interrupts by default */
+       priv->msi_enabled = 0;
+
+       /* check if MSI capability is available */
+       if (use_msi) {
+               if (!pci_enable_msi(pdev)) {
+                       pr_debug("enabled MSI interrupt\n");
+                       priv->msi_enabled = 1;
+               } else {
+                       pr_warn("failed to enable MSI interrupts");
+               }
+       }
+
+       if (!priv->msi_enabled) {
+               pr_warn("legacy PCIE interrupts enabled\n");
+               pci_intx(pdev, 1);
+       }
+}
+
+static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index)
+{
+       void __iomem *vaddr;
+       dma_addr_t busaddr;
+       size_t len;
+       int ret;
+
+       ret = pcim_iomap_regions(priv->pdev, 1 << index, "qtnfmac_pcie");
+       if (ret)
+               return IOMEM_ERR_PTR(ret);
+
+       busaddr = pci_resource_start(priv->pdev, index);
+       len = pci_resource_len(priv->pdev, index);
+       vaddr = pcim_iomap_table(priv->pdev)[index];
+       if (!vaddr)
+               return IOMEM_ERR_PTR(-ENOMEM);
+
+       pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n",
+                index, vaddr, &busaddr, (int)len);
+
+       return vaddr;
+}
+
+static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv)
+{
+       int ret = -ENOMEM;
+
+       priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR);
+       if (IS_ERR(priv->sysctl_bar)) {
+               pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
+               return ret;
+       }
+
+       priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR);
+       if (IS_ERR(priv->dmareg_bar)) {
+               pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
+               return ret;
+       }
+
+       priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR);
+       if (IS_ERR(priv->epmem_bar)) {
+               pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void qtnf_pcie_control_rx_callback(void *arg, const u8 __iomem *buf,
+                                         size_t len)
+{
+       struct qtnf_pcie_bus_priv *priv = arg;
+       struct qtnf_bus *bus = pci_get_drvdata(priv->pdev);
+       struct sk_buff *skb;
+
+       if (unlikely(len == 0)) {
+               pr_warn("zero length packet received\n");
+               return;
+       }
+
+       skb = __dev_alloc_skb(len, GFP_KERNEL);
+
+       if (unlikely(!skb)) {
+               pr_err("failed to allocate skb\n");
+               return;
+       }
+
+       memcpy_fromio(skb_put(skb, len), buf, len);
+
+       qtnf_trans_handle_rx_ctl_packet(bus, skb);
+}
+
+void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv,
+                           struct qtnf_shm_ipc_region __iomem *ipc_tx_reg,
+                           struct qtnf_shm_ipc_region __iomem *ipc_rx_reg,
+                           const struct qtnf_shm_ipc_int *ipc_int)
+{
+       const struct qtnf_shm_ipc_rx_callback rx_callback = {
+                                       qtnf_pcie_control_rx_callback, priv };
+
+       qtnf_shm_ipc_init(&priv->shm_ipc_ep_in, QTNF_SHM_IPC_OUTBOUND,
+                         ipc_tx_reg, priv->workqueue,
+                         ipc_int, &rx_callback);
+       qtnf_shm_ipc_init(&priv->shm_ipc_ep_out, QTNF_SHM_IPC_INBOUND,
+                         ipc_rx_reg, priv->workqueue,
+                         ipc_int, &rx_callback);
+}
+
+int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size,
+                   const struct qtnf_bus_ops *bus_ops, u64 dma_mask,
+                   bool use_msi)
+{
+       struct qtnf_pcie_bus_priv *pcie_priv;
+       struct qtnf_bus *bus;
+       int ret;
+
+       bus = devm_kzalloc(&pdev->dev,
+                          sizeof(*bus) + priv_size, GFP_KERNEL);
+       if (!bus)
+               return -ENOMEM;
+
+       pcie_priv = get_bus_priv(bus);
+
+       pci_set_drvdata(pdev, bus);
+       bus->bus_ops = bus_ops;
+       bus->dev = &pdev->dev;
+       bus->fw_state = QTNF_FW_STATE_RESET;
+       pcie_priv->pdev = pdev;
+       pcie_priv->tx_stopped = 0;
+
+       mutex_init(&bus->bus_lock);
+       spin_lock_init(&pcie_priv->tx_lock);
+       spin_lock_init(&pcie_priv->tx_reclaim_lock);
+
+       pcie_priv->tx_full_count = 0;
+       pcie_priv->tx_done_count = 0;
+       pcie_priv->pcie_irq_count = 0;
+       pcie_priv->tx_reclaim_done = 0;
+       pcie_priv->tx_reclaim_req = 0;
+
+       pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE");
+       if (!pcie_priv->workqueue) {
+               pr_err("failed to alloc bus workqueue\n");
+               ret = -ENODEV;
+               goto err_init;
+       }
+
+       init_dummy_netdev(&bus->mux_dev);
+
+       if (!pci_is_pcie(pdev)) {
+               pr_err("device %s is not PCI Express\n", pci_name(pdev));
+               ret = -EIO;
+               goto err_base;
+       }
+
+       qtnf_tune_pcie_mps(pcie_priv);
+
+       ret = pcim_enable_device(pdev);
+       if (ret) {
+               pr_err("failed to init PCI device %x\n", pdev->device);
+               goto err_base;
+       } else {
+               pr_debug("successful init of PCI device %x\n", pdev->device);
+       }
+
+       ret = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
+       if (ret) {
+               pr_err("PCIE DMA coherent mask init failed\n");
+               goto err_base;
+       }
+
+       pci_set_master(pdev);
+       qtnf_pcie_init_irq(pcie_priv, use_msi);
+
+       ret = qtnf_pcie_init_memory(pcie_priv);
+       if (ret < 0) {
+               pr_err("PCIE memory init failed\n");
+               goto err_base;
+       }
+
+       pci_save_state(pdev);
+
+       return 0;
+
+err_base:
+       flush_workqueue(pcie_priv->workqueue);
+       destroy_workqueue(pcie_priv->workqueue);
+err_init:
+       pci_set_drvdata(pdev, NULL);
+
+       return ret;
+}
+
+static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv)
+{
+       qtnf_shm_ipc_free(&priv->shm_ipc_ep_in);
+       qtnf_shm_ipc_free(&priv->shm_ipc_ep_out);
+}
+
+void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv)
+{
+       cancel_work_sync(&bus->fw_work);
+
+       if (bus->fw_state == QTNF_FW_STATE_ACTIVE ||
+           bus->fw_state == QTNF_FW_STATE_EP_DEAD)
+               qtnf_core_detach(bus);
+
+       netif_napi_del(&bus->mux_napi);
+       flush_workqueue(priv->workqueue);
+       destroy_workqueue(priv->workqueue);
+       tasklet_kill(&priv->reclaim_tq);
+
+       qtnf_pcie_free_shm_ipc(priv);
+       qtnf_debugfs_remove(bus);
+       pci_set_drvdata(priv->pdev, NULL);
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
new file mode 100644 (file)
index 0000000..5c70fb4
--- /dev/null
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018 Quantenna Communications, Inc. All rights reserved. */
+
+#ifndef _QTN_FMAC_PCIE_H_
+#define _QTN_FMAC_PCIE_H_
+
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+
+#include "shm_ipc.h"
+#include "bus.h"
+
+#define SKB_BUF_SIZE           2048
+
+#define QTN_FW_DL_TIMEOUT_MS   3000
+#define QTN_FW_QLINK_TIMEOUT_MS        30000
+#define QTN_EP_RESET_WAIT_MS   1000
+
+struct qtnf_pcie_bus_priv {
+       struct pci_dev *pdev;
+
+       spinlock_t tx_reclaim_lock;
+       spinlock_t tx_lock;
+       int mps;
+
+       struct workqueue_struct *workqueue;
+       struct tasklet_struct reclaim_tq;
+
+       void __iomem *sysctl_bar;
+       void __iomem *epmem_bar;
+       void __iomem *dmareg_bar;
+
+       struct qtnf_shm_ipc shm_ipc_ep_in;
+       struct qtnf_shm_ipc shm_ipc_ep_out;
+
+       u16 tx_bd_num;
+       u16 rx_bd_num;
+
+       struct sk_buff **tx_skb;
+       struct sk_buff **rx_skb;
+
+       u32 rx_bd_w_index;
+       u32 rx_bd_r_index;
+
+       u32 tx_bd_w_index;
+       u32 tx_bd_r_index;
+
+       /* diagnostics stats */
+       u32 pcie_irq_count;
+       u32 tx_full_count;
+       u32 tx_done_count;
+       u32 tx_reclaim_done;
+       u32 tx_reclaim_req;
+
+       u8 msi_enabled;
+       u8 tx_stopped;
+};
+
+int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb);
+int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv);
+void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus);
+void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success,
+                           const char *drv_name);
+void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv,
+                           struct qtnf_shm_ipc_region __iomem *ipc_tx_reg,
+                           struct qtnf_shm_ipc_region __iomem *ipc_rx_reg,
+                           const struct qtnf_shm_ipc_int *ipc_int);
+int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size,
+                   const struct qtnf_bus_ops *bus_ops, u64 dma_mask,
+                   bool use_msi);
+void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv);
+
+static inline void qtnf_non_posted_write(u32 val, void __iomem *basereg)
+{
+       writel(val, basereg);
+
+       /* flush posted write */
+       readl(basereg);
+}
+
+#endif /* _QTN_FMAC_PCIE_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
new file mode 100644 (file)
index 0000000..5aca12a
--- /dev/null
@@ -0,0 +1,1262 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/crc32.h>
+#include <linux/spinlock.h>
+#include <linux/circ_buf.h>
+#include <linux/log2.h>
+
+#include "pcie_priv.h"
+#include "pearl_pcie_regs.h"
+#include "pearl_pcie_ipc.h"
+#include "qtn_hw_ids.h"
+#include "core.h"
+#include "bus.h"
+#include "shm_ipc.h"
+#include "debug.h"
+
+static bool use_msi = true;
+module_param(use_msi, bool, 0644);
+MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
+
+static unsigned int tx_bd_size_param = 32;
+module_param(tx_bd_size_param, uint, 0644);
+MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two");
+
+static unsigned int rx_bd_size_param = 256;
+module_param(rx_bd_size_param, uint, 0644);
+MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two");
+
+static u8 flashboot = 1;
+module_param(flashboot, byte, 0644);
+MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS");
+
+#define DRV_NAME       "qtnfmac_pearl_pcie"
+
+struct qtnf_pearl_bda {
+       __le16 bda_len;
+       __le16 bda_version;
+       __le32 bda_pci_endian;
+       __le32 bda_ep_state;
+       __le32 bda_rc_state;
+       __le32 bda_dma_mask;
+       __le32 bda_msi_addr;
+       __le32 bda_flashsz;
+       u8 bda_boardname[PCIE_BDA_NAMELEN];
+       __le32 bda_rc_msi_enabled;
+       u8 bda_hhbm_list[PCIE_HHBM_MAX_SIZE];
+       __le32 bda_dsbw_start_index;
+       __le32 bda_dsbw_end_index;
+       __le32 bda_dsbw_total_bytes;
+       __le32 bda_rc_tx_bd_base;
+       __le32 bda_rc_tx_bd_num;
+       u8 bda_pcie_mac[QTN_ENET_ADDR_LENGTH];
+       struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096); /* host TX */
+       struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096); /* host RX */
+} __packed;
+
+struct qtnf_pearl_tx_bd {
+       __le32 addr;
+       __le32 addr_h;
+       __le32 info;
+       __le32 info_h;
+} __packed;
+
+struct qtnf_pearl_rx_bd {
+       __le32 addr;
+       __le32 addr_h;
+       __le32 info;
+       __le32 info_h;
+       __le32 next_ptr;
+       __le32 next_ptr_h;
+} __packed;
+
+struct qtnf_pearl_fw_hdr {
+       u8 boardflg[8];
+       __le32 fwsize;
+       __le32 seqnum;
+       __le32 type;
+       __le32 pktlen;
+       __le32 crc;
+} __packed;
+
+struct qtnf_pcie_pearl_state {
+       struct qtnf_pcie_bus_priv base;
+
+       /* lock for irq configuration changes */
+       spinlock_t irq_lock;
+
+       struct qtnf_pearl_bda __iomem *bda;
+       void __iomem *pcie_reg_base;
+
+       struct qtnf_pearl_tx_bd *tx_bd_vbase;
+       dma_addr_t tx_bd_pbase;
+
+       struct qtnf_pearl_rx_bd *rx_bd_vbase;
+       dma_addr_t rx_bd_pbase;
+
+       dma_addr_t bd_table_paddr;
+       void *bd_table_vaddr;
+       u32 bd_table_len;
+       u32 pcie_irq_mask;
+       u32 pcie_irq_rx_count;
+       u32 pcie_irq_tx_count;
+       u32 pcie_irq_uf_count;
+};
+
+static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ps->irq_lock, flags);
+       ps->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS);
+       spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ps->irq_lock, flags);
+       writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+       spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ps->irq_lock, flags);
+       writel(0x0, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+       spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_pearl_state *ps)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ps->irq_lock, flags);
+       ps->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS;
+       writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+       spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_pearl_state *ps)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ps->irq_lock, flags);
+       ps->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS;
+       writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+       spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static inline void qtnf_en_txdone_irq(struct qtnf_pcie_pearl_state *ps)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ps->irq_lock, flags);
+       ps->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS;
+       writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+       spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_pearl_state *ps)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ps->irq_lock, flags);
+       ps->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS;
+       writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+       spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static void qtnf_deassert_intx(struct qtnf_pcie_pearl_state *ps)
+{
+       void __iomem *reg = ps->base.sysctl_bar + PEARL_PCIE_CFG0_OFFSET;
+       u32 cfg;
+
+       cfg = readl(reg);
+       cfg &= ~PEARL_ASSERT_INTX;
+       qtnf_non_posted_write(cfg, reg);
+}
+
+static void qtnf_pearl_reset_ep(struct qtnf_pcie_pearl_state *ps)
+{
+       const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET);
+       void __iomem *reg = ps->base.sysctl_bar +
+                           QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
+
+       qtnf_non_posted_write(data, reg);
+       msleep(QTN_EP_RESET_WAIT_MS);
+       pci_restore_state(ps->base.pdev);
+}
+
+static void qtnf_pcie_pearl_ipc_gen_ep_int(void *arg)
+{
+       const struct qtnf_pcie_pearl_state *ps = arg;
+       const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ);
+       void __iomem *reg = ps->base.sysctl_bar +
+                           QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
+
+       qtnf_non_posted_write(data, reg);
+}
+
+static int qtnf_is_state(__le32 __iomem *reg, u32 state)
+{
+       u32 s = readl(reg);
+
+       return s & state;
+}
+
+static void qtnf_set_state(__le32 __iomem *reg, u32 state)
+{
+       u32 s = readl(reg);
+
+       qtnf_non_posted_write(state | s, reg);
+}
+
+static void qtnf_clear_state(__le32 __iomem *reg, u32 state)
+{
+       u32 s = readl(reg);
+
+       qtnf_non_posted_write(s & ~state, reg);
+}
+
+static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
+{
+       u32 timeout = 0;
+
+       while ((qtnf_is_state(reg, state) == 0)) {
+               usleep_range(1000, 1200);
+               if (++timeout > delay_in_ms)
+                       return -1;
+       }
+
+       return 0;
+}
+
+static int pearl_alloc_bd_table(struct qtnf_pcie_pearl_state *ps)
+{
+       struct qtnf_pcie_bus_priv *priv = &ps->base;
+       dma_addr_t paddr;
+       void *vaddr;
+       int len;
+
+       len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) +
+               priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd);
+
+       vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
+       if (!vaddr)
+               return -ENOMEM;
+
+       /* tx bd */
+
+       memset(vaddr, 0, len);
+
+       ps->bd_table_vaddr = vaddr;
+       ps->bd_table_paddr = paddr;
+       ps->bd_table_len = len;
+
+       ps->tx_bd_vbase = vaddr;
+       ps->tx_bd_pbase = paddr;
+
+       pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+       priv->tx_bd_r_index = 0;
+       priv->tx_bd_w_index = 0;
+
+       /* rx bd */
+
+       vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num;
+       paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
+
+       ps->rx_bd_vbase = vaddr;
+       ps->rx_bd_pbase = paddr;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       writel(QTN_HOST_HI32(paddr),
+              PCIE_HDP_TX_HOST_Q_BASE_H(ps->pcie_reg_base));
+#endif
+       writel(QTN_HOST_LO32(paddr),
+              PCIE_HDP_TX_HOST_Q_BASE_L(ps->pcie_reg_base));
+       writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd)) << 16,
+              PCIE_HDP_TX_HOST_Q_SZ_CTRL(ps->pcie_reg_base));
+
+       pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+       return 0;
+}
+
+static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index)
+{
+       struct qtnf_pcie_bus_priv *priv = &ps->base;
+       struct qtnf_pearl_rx_bd *rxbd;
+       struct sk_buff *skb;
+       dma_addr_t paddr;
+
+       skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
+       if (!skb) {
+               priv->rx_skb[index] = NULL;
+               return -ENOMEM;
+       }
+
+       priv->rx_skb[index] = skb;
+       rxbd = &ps->rx_bd_vbase[index];
+
+       paddr = pci_map_single(priv->pdev, skb->data,
+                              SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
+       if (pci_dma_mapping_error(priv->pdev, paddr)) {
+               pr_err("skb DMA mapping error: %pad\n", &paddr);
+               return -ENOMEM;
+       }
+
+       /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
+       rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
+       rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr));
+       rxbd->info = 0x0;
+
+       priv->rx_bd_w_index = index;
+
+       /* sync up all descriptor updates */
+       wmb();
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       writel(QTN_HOST_HI32(paddr),
+              PCIE_HDP_HHBM_BUF_PTR_H(ps->pcie_reg_base));
+#endif
+       writel(QTN_HOST_LO32(paddr),
+              PCIE_HDP_HHBM_BUF_PTR(ps->pcie_reg_base));
+
+       writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(ps->pcie_reg_base));
+       return 0;
+}
+
+static int pearl_alloc_rx_buffers(struct qtnf_pcie_pearl_state *ps)
+{
+       u16 i;
+       int ret = 0;
+
+       memset(ps->rx_bd_vbase, 0x0,
+              ps->base.rx_bd_num * sizeof(struct qtnf_pearl_rx_bd));
+
+       for (i = 0; i < ps->base.rx_bd_num; i++) {
+               ret = pearl_skb2rbd_attach(ps, i);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+/* all rx/tx activity should have ceased before calling this function */
+static void qtnf_pearl_free_xfer_buffers(struct qtnf_pcie_pearl_state *ps)
+{
+       struct qtnf_pcie_bus_priv *priv = &ps->base;
+       struct qtnf_pearl_tx_bd *txbd;
+       struct qtnf_pearl_rx_bd *rxbd;
+       struct sk_buff *skb;
+       dma_addr_t paddr;
+       int i;
+
+       /* free rx buffers */
+       for (i = 0; i < priv->rx_bd_num; i++) {
+               if (priv->rx_skb && priv->rx_skb[i]) {
+                       rxbd = &ps->rx_bd_vbase[i];
+                       skb = priv->rx_skb[i];
+                       paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
+                                             le32_to_cpu(rxbd->addr));
+                       pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
+                                        PCI_DMA_FROMDEVICE);
+                       dev_kfree_skb_any(skb);
+                       priv->rx_skb[i] = NULL;
+               }
+       }
+
+       /* free tx buffers */
+       for (i = 0; i < priv->tx_bd_num; i++) {
+               if (priv->tx_skb && priv->tx_skb[i]) {
+                       txbd = &ps->tx_bd_vbase[i];
+                       skb = priv->tx_skb[i];
+                       paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
+                                             le32_to_cpu(txbd->addr));
+                       pci_unmap_single(priv->pdev, paddr, skb->len,
+                                        PCI_DMA_TODEVICE);
+                       dev_kfree_skb_any(skb);
+                       priv->tx_skb[i] = NULL;
+               }
+       }
+}
+
+static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps)
+{
+       u32 val;
+
+       val = readl(PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+       val |= HHBM_CONFIG_SOFT_RESET;
+       writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+       usleep_range(50, 100);
+       val &= ~HHBM_CONFIG_SOFT_RESET;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       val |= HHBM_64BIT;
+#endif
+       writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+       writel(ps->base.rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(ps->pcie_reg_base));
+
+       return 0;
+}
+
+static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps)
+{
+       struct qtnf_pcie_bus_priv *priv = &ps->base;
+       int ret;
+       u32 val;
+
+       priv->tx_bd_num = tx_bd_size_param;
+       priv->rx_bd_num = rx_bd_size_param;
+       priv->rx_bd_w_index = 0;
+       priv->rx_bd_r_index = 0;
+
+       if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) {
+               pr_err("tx_bd_size_param %u is not power of two\n",
+                      priv->tx_bd_num);
+               return -EINVAL;
+       }
+
+       val = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
+       if (val > PCIE_HHBM_MAX_SIZE) {
+               pr_err("tx_bd_size_param %u is too large\n",
+                      priv->tx_bd_num);
+               return -EINVAL;
+       }
+
+       if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) {
+               pr_err("rx_bd_size_param %u is not power of two\n",
+                      priv->rx_bd_num);
+               return -EINVAL;
+       }
+
+       val = priv->rx_bd_num * sizeof(dma_addr_t);
+       if (val > PCIE_HHBM_MAX_SIZE) {
+               pr_err("rx_bd_size_param %u is too large\n",
+                      priv->rx_bd_num);
+               return -EINVAL;
+       }
+
+       ret = pearl_hhbm_init(ps);
+       if (ret) {
+               pr_err("failed to init h/w queues\n");
+               return ret;
+       }
+
+       ret = qtnf_pcie_alloc_skb_array(priv);
+       if (ret) {
+               pr_err("failed to allocate skb array\n");
+               return ret;
+       }
+
+       ret = pearl_alloc_bd_table(ps);
+       if (ret) {
+               pr_err("failed to allocate bd table\n");
+               return ret;
+       }
+
+       ret = pearl_alloc_rx_buffers(ps);
+       if (ret) {
+               pr_err("failed to allocate rx buffers\n");
+               return ret;
+       }
+
+       return ret;
+}
+
+static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps)
+{
+       struct qtnf_pcie_bus_priv *priv = &ps->base;
+       struct qtnf_pearl_tx_bd *txbd;
+       struct sk_buff *skb;
+       unsigned long flags;
+       dma_addr_t paddr;
+       u32 tx_done_index;
+       int count = 0;
+       int i;
+
+       spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
+
+       tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
+                       & (priv->tx_bd_num - 1);
+
+       i = priv->tx_bd_r_index;
+
+       while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
+               skb = priv->tx_skb[i];
+               if (likely(skb)) {
+                       txbd = &ps->tx_bd_vbase[i];
+                       paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
+                                             le32_to_cpu(txbd->addr));
+                       pci_unmap_single(priv->pdev, paddr, skb->len,
+                                        PCI_DMA_TODEVICE);
+
+                       if (skb->dev) {
+                               qtnf_update_tx_stats(skb->dev, skb);
+                               if (unlikely(priv->tx_stopped)) {
+                                       qtnf_wake_all_queues(skb->dev);
+                                       priv->tx_stopped = 0;
+                               }
+                       }
+
+                       dev_kfree_skb_any(skb);
+               }
+
+               priv->tx_skb[i] = NULL;
+               count++;
+
+               if (++i >= priv->tx_bd_num)
+                       i = 0;
+       }
+
+       priv->tx_reclaim_done += count;
+       priv->tx_reclaim_req++;
+       priv->tx_bd_r_index = i;
+
+       spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
+}
+
+static int qtnf_tx_queue_ready(struct qtnf_pcie_pearl_state *ps)
+{
+       struct qtnf_pcie_bus_priv *priv = &ps->base;
+
+       if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
+                       priv->tx_bd_num)) {
+               qtnf_pearl_data_tx_reclaim(ps);
+
+               if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
+                               priv->tx_bd_num)) {
+                       pr_warn_ratelimited("reclaim full Tx queue\n");
+                       priv->tx_full_count++;
+                       return 0;
+               }
+       }
+
+       return 1;
+}
+
+static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+       struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
+       struct qtnf_pcie_bus_priv *priv = &ps->base;
+       dma_addr_t txbd_paddr, skb_paddr;
+       struct qtnf_pearl_tx_bd *txbd;
+       unsigned long flags;
+       int len, i;
+       u32 info;
+       int ret = 0;
+
+       spin_lock_irqsave(&priv->tx_lock, flags);
+
+       if (!qtnf_tx_queue_ready(ps)) {
+               if (skb->dev) {
+                       netif_tx_stop_all_queues(skb->dev);
+                       priv->tx_stopped = 1;
+               }
+
+               spin_unlock_irqrestore(&priv->tx_lock, flags);
+               return NETDEV_TX_BUSY;
+       }
+
+       i = priv->tx_bd_w_index;
+       priv->tx_skb[i] = skb;
+       len = skb->len;
+
+       skb_paddr = pci_map_single(priv->pdev, skb->data,
+                                  skb->len, PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
+               pr_err("skb DMA mapping error: %pad\n", &skb_paddr);
+               ret = -ENOMEM;
+               goto tx_done;
+       }
+
+       txbd = &ps->tx_bd_vbase[i];
+       txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
+       txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr));
+
+       info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT;
+       txbd->info = cpu_to_le32(info);
+
+       /* sync up all descriptor updates before passing them to EP */
+       dma_wmb();
+
+       /* write new TX descriptor to PCIE_RX_FIFO on EP */
+       txbd_paddr = ps->tx_bd_pbase + i * sizeof(struct qtnf_pearl_tx_bd);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       writel(QTN_HOST_HI32(txbd_paddr),
+              PCIE_HDP_HOST_WR_DESC0_H(ps->pcie_reg_base));
+#endif
+       writel(QTN_HOST_LO32(txbd_paddr),
+              PCIE_HDP_HOST_WR_DESC0(ps->pcie_reg_base));
+
+       if (++i >= priv->tx_bd_num)
+               i = 0;
+
+       priv->tx_bd_w_index = i;
+
+tx_done:
+       if (ret && skb) {
+               pr_err_ratelimited("drop skb\n");
+               if (skb->dev)
+                       skb->dev->stats.tx_dropped++;
+               dev_kfree_skb_any(skb);
+       }
+
+       priv->tx_done_count++;
+       spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+       qtnf_pearl_data_tx_reclaim(ps);
+
+       return NETDEV_TX_OK;
+}
+
+static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data)
+{
+       struct qtnf_bus *bus = (struct qtnf_bus *)data;
+       struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
+       struct qtnf_pcie_bus_priv *priv = &ps->base;
+       u32 status;
+
+       priv->pcie_irq_count++;
+       status = readl(PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
+
+       qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
+       qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
+
+       if (!(status & ps->pcie_irq_mask))
+               goto irq_done;
+
+       if (status & PCIE_HDP_INT_RX_BITS)
+               ps->pcie_irq_rx_count++;
+
+       if (status & PCIE_HDP_INT_TX_BITS)
+               ps->pcie_irq_tx_count++;
+
+       if (status & PCIE_HDP_INT_HHBM_UF)
+               ps->pcie_irq_uf_count++;
+
+       if (status & PCIE_HDP_INT_RX_BITS) {
+               qtnf_dis_rxdone_irq(ps);
+               napi_schedule(&bus->mux_napi);
+       }
+
+       if (status & PCIE_HDP_INT_TX_BITS) {
+               qtnf_dis_txdone_irq(ps);
+               tasklet_hi_schedule(&priv->reclaim_tq);
+       }
+
+irq_done:
+       /* H/W workaround: clean all bits, not only enabled */
+       qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
+
+       if (!priv->msi_enabled)
+               qtnf_deassert_intx(ps);
+
+       return IRQ_HANDLED;
+}
+
+static int qtnf_rx_data_ready(struct qtnf_pcie_pearl_state *ps)
+{
+       u16 index = ps->base.rx_bd_r_index;
+       struct qtnf_pearl_rx_bd *rxbd;
+       u32 descw;
+
+       rxbd = &ps->rx_bd_vbase[index];
+       descw = le32_to_cpu(rxbd->info);
+
+       if (descw & QTN_TXDONE_MASK)
+               return 1;
+
+       return 0;
+}
+
+static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
+       struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
+       struct qtnf_pcie_bus_priv *priv = &ps->base;
+       struct net_device *ndev = NULL;
+       struct sk_buff *skb = NULL;
+       int processed = 0;
+       struct qtnf_pearl_rx_bd *rxbd;
+       dma_addr_t skb_paddr;
+       int consume;
+       u32 descw;
+       u32 psize;
+       u16 r_idx;
+       u16 w_idx;
+       int ret;
+
+       while (processed < budget) {
+               if (!qtnf_rx_data_ready(ps))
+                       goto rx_out;
+
+               r_idx = priv->rx_bd_r_index;
+               rxbd = &ps->rx_bd_vbase[r_idx];
+               descw = le32_to_cpu(rxbd->info);
+
+               skb = priv->rx_skb[r_idx];
+               psize = QTN_GET_LEN(descw);
+               consume = 1;
+
+               if (!(descw & QTN_TXDONE_MASK)) {
+                       pr_warn("skip invalid rxbd[%d]\n", r_idx);
+                       consume = 0;
+               }
+
+               if (!skb) {
+                       pr_warn("skip missing rx_skb[%d]\n", r_idx);
+                       consume = 0;
+               }
+
+               if (skb && (skb_tailroom(skb) <  psize)) {
+                       pr_err("skip packet with invalid length: %u > %u\n",
+                              psize, skb_tailroom(skb));
+                       consume = 0;
+               }
+
+               if (skb) {
+                       skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
+                                                 le32_to_cpu(rxbd->addr));
+                       pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
+                                        PCI_DMA_FROMDEVICE);
+               }
+
+               if (consume) {
+                       skb_put(skb, psize);
+                       ndev = qtnf_classify_skb(bus, skb);
+                       if (likely(ndev)) {
+                               qtnf_update_rx_stats(ndev, skb);
+                               skb->protocol = eth_type_trans(skb, ndev);
+                               napi_gro_receive(napi, skb);
+                       } else {
+                               pr_debug("drop untagged skb\n");
+                               bus->mux_dev.stats.rx_dropped++;
+                               dev_kfree_skb_any(skb);
+                       }
+               } else {
+                       if (skb) {
+                               bus->mux_dev.stats.rx_dropped++;
+                               dev_kfree_skb_any(skb);
+                       }
+               }
+
+               priv->rx_skb[r_idx] = NULL;
+               if (++r_idx >= priv->rx_bd_num)
+                       r_idx = 0;
+
+               priv->rx_bd_r_index = r_idx;
+
+               /* repalce processed buffer by a new one */
+               w_idx = priv->rx_bd_w_index;
+               while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
+                                 priv->rx_bd_num) > 0) {
+                       if (++w_idx >= priv->rx_bd_num)
+                               w_idx = 0;
+
+                       ret = pearl_skb2rbd_attach(ps, w_idx);
+                       if (ret) {
+                               pr_err("failed to allocate new rx_skb[%d]\n",
+                                      w_idx);
+                               break;
+                       }
+               }
+
+               processed++;
+       }
+
+rx_out:
+       if (processed < budget) {
+               napi_complete(napi);
+               qtnf_en_rxdone_irq(ps);
+       }
+
+       return processed;
+}
+
+static void
+qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
+{
+       struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
+
+       tasklet_hi_schedule(&ps->base.reclaim_tq);
+}
+
+static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
+{
+       struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
+
+       qtnf_enable_hdp_irqs(ps);
+       napi_enable(&bus->mux_napi);
+}
+
+static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
+{
+       struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
+
+       napi_disable(&bus->mux_napi);
+       qtnf_disable_hdp_irqs(ps);
+}
+
+static const struct qtnf_bus_ops qtnf_pcie_pearl_bus_ops = {
+       /* control path methods */
+       .control_tx     = qtnf_pcie_control_tx,
+
+       /* data path methods */
+       .data_tx                = qtnf_pcie_data_tx,
+       .data_tx_timeout        = qtnf_pcie_data_tx_timeout,
+       .data_rx_start          = qtnf_pcie_data_rx_start,
+       .data_rx_stop           = qtnf_pcie_data_rx_stop,
+};
+
+static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
+{
+       struct qtnf_bus *bus = dev_get_drvdata(s->private);
+       struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
+       u32 reg = readl(PCIE_HDP_INT_EN(ps->pcie_reg_base));
+       u32 status;
+
+       seq_printf(s, "pcie_irq_count(%u)\n", ps->base.pcie_irq_count);
+       seq_printf(s, "pcie_irq_tx_count(%u)\n", ps->pcie_irq_tx_count);
+       status = reg &  PCIE_HDP_INT_TX_BITS;
+       seq_printf(s, "pcie_irq_tx_status(%s)\n",
+                  (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS");
+       seq_printf(s, "pcie_irq_rx_count(%u)\n", ps->pcie_irq_rx_count);
+       status = reg &  PCIE_HDP_INT_RX_BITS;
+       seq_printf(s, "pcie_irq_rx_status(%s)\n",
+                  (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS");
+       seq_printf(s, "pcie_irq_uf_count(%u)\n", ps->pcie_irq_uf_count);
+       status = reg &  PCIE_HDP_INT_HHBM_UF;
+       seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n",
+                  (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS");
+
+       return 0;
+}
+
+static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
+{
+       struct qtnf_bus *bus = dev_get_drvdata(s->private);
+       struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
+       struct qtnf_pcie_bus_priv *priv = &ps->base;
+
+       seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
+       seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
+       seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
+       seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
+
+       seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
+       seq_printf(s, "tx_bd_p_index(%u)\n",
+                  readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
+                       & (priv->tx_bd_num - 1));
+       seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
+       seq_printf(s, "tx queue len(%u)\n",
+                  CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
+                           priv->tx_bd_num));
+
+       seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
+       seq_printf(s, "rx_bd_p_index(%u)\n",
+                  readl(PCIE_HDP_TX0DMA_CNT(ps->pcie_reg_base))
+                       & (priv->rx_bd_num - 1));
+       seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
+       seq_printf(s, "rx alloc queue len(%u)\n",
+                  CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
+                             priv->rx_bd_num));
+
+       return 0;
+}
+
+static int qtnf_ep_fw_send(struct pci_dev *pdev, uint32_t size,
+                          int blk, const u8 *pblk, const u8 *fw)
+{
+       struct qtnf_bus *bus = pci_get_drvdata(pdev);
+
+       struct qtnf_pearl_fw_hdr *hdr;
+       u8 *pdata;
+
+       int hds = sizeof(*hdr);
+       struct sk_buff *skb = NULL;
+       int len = 0;
+       int ret;
+
+       skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       skb->len = QTN_PCIE_FW_BUFSZ;
+       skb->dev = NULL;
+
+       hdr = (struct qtnf_pearl_fw_hdr *)skb->data;
+       memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG));
+       hdr->fwsize = cpu_to_le32(size);
+       hdr->seqnum = cpu_to_le32(blk);
+
+       if (blk)
+               hdr->type = cpu_to_le32(QTN_FW_DSUB);
+       else
+               hdr->type = cpu_to_le32(QTN_FW_DBEGIN);
+
+       pdata = skb->data + hds;
+
+       len = QTN_PCIE_FW_BUFSZ - hds;
+       if (pblk >= (fw + size - len)) {
+               len = fw + size - pblk;
+               hdr->type = cpu_to_le32(QTN_FW_DEND);
+       }
+
+       hdr->pktlen = cpu_to_le32(len);
+       memcpy(pdata, pblk, len);
+       hdr->crc = cpu_to_le32(~crc32(0, pdata, len));
+
+       ret = qtnf_pcie_data_tx(bus, skb);
+
+       return (ret == NETDEV_TX_OK) ? len : 0;
+}
+
+static int
+qtnf_ep_fw_load(struct qtnf_pcie_pearl_state *ps, const u8 *fw, u32 fw_size)
+{
+       int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pearl_fw_hdr);
+       int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0);
+       const u8 *pblk = fw;
+       int threshold = 0;
+       int blk = 0;
+       int len;
+
+       pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size);
+
+       while (blk < blk_count) {
+               if (++threshold > 10000) {
+                       pr_err("FW upload failed: too many retries\n");
+                       return -ETIMEDOUT;
+               }
+
+               len = qtnf_ep_fw_send(ps->base.pdev, fw_size, blk, pblk, fw);
+               if (len <= 0)
+                       continue;
+
+               if (!((blk + 1) & QTN_PCIE_FW_DLMASK) ||
+                   (blk == (blk_count - 1))) {
+                       qtnf_set_state(&ps->bda->bda_rc_state,
+                                      QTN_RC_FW_SYNC);
+                       if (qtnf_poll_state(&ps->bda->bda_ep_state,
+                                           QTN_EP_FW_SYNC,
+                                           QTN_FW_DL_TIMEOUT_MS)) {
+                               pr_err("FW upload failed: SYNC timed out\n");
+                               return -ETIMEDOUT;
+                       }
+
+                       qtnf_clear_state(&ps->bda->bda_ep_state,
+                                        QTN_EP_FW_SYNC);
+
+                       if (qtnf_is_state(&ps->bda->bda_ep_state,
+                                         QTN_EP_FW_RETRY)) {
+                               if (blk == (blk_count - 1)) {
+                                       int last_round =
+                                               blk_count & QTN_PCIE_FW_DLMASK;
+                                       blk -= last_round;
+                                       pblk -= ((last_round - 1) *
+                                               blk_size + len);
+                               } else {
+                                       blk -= QTN_PCIE_FW_DLMASK;
+                                       pblk -= QTN_PCIE_FW_DLMASK * blk_size;
+                               }
+
+                               qtnf_clear_state(&ps->bda->bda_ep_state,
+                                                QTN_EP_FW_RETRY);
+
+                               pr_warn("FW upload retry: block #%d\n", blk);
+                               continue;
+                       }
+
+                       qtnf_pearl_data_tx_reclaim(ps);
+               }
+
+               pblk += len;
+               blk++;
+       }
+
+       pr_debug("FW upload completed: totally sent %d blocks\n", blk);
+       return 0;
+}
+
+static void qtnf_pearl_fw_work_handler(struct work_struct *work)
+{
+       struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
+       struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
+       struct pci_dev *pdev = ps->base.pdev;
+       const struct firmware *fw;
+       int ret;
+       u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
+       const char *fwname = QTN_PCI_PEARL_FW_NAME;
+       bool fw_boot_success = false;
+
+       if (flashboot) {
+               state |= QTN_RC_FW_FLASHBOOT;
+       } else {
+               ret = request_firmware(&fw, fwname, &pdev->dev);
+               if (ret < 0) {
+                       pr_err("failed to get firmware %s\n", fwname);
+                       goto fw_load_exit;
+               }
+       }
+
+       qtnf_set_state(&ps->bda->bda_rc_state, state);
+
+       if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY,
+                           QTN_FW_DL_TIMEOUT_MS)) {
+               pr_err("card is not ready\n");
+
+               if (!flashboot)
+                       release_firmware(fw);
+
+               goto fw_load_exit;
+       }
+
+       qtnf_clear_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY);
+
+       if (flashboot) {
+               pr_info("booting firmware from flash\n");
+
+       } else {
+               pr_info("starting firmware upload: %s\n", fwname);
+
+               ret = qtnf_ep_fw_load(ps, fw->data, fw->size);
+               release_firmware(fw);
+               if (ret) {
+                       pr_err("firmware upload error\n");
+                       goto fw_load_exit;
+               }
+       }
+
+       if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_DONE,
+                           QTN_FW_DL_TIMEOUT_MS)) {
+               pr_err("firmware bringup timed out\n");
+               goto fw_load_exit;
+       }
+
+       pr_info("firmware is up and running\n");
+
+       if (qtnf_poll_state(&ps->bda->bda_ep_state,
+                           QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) {
+               pr_err("firmware runtime failure\n");
+               goto fw_load_exit;
+       }
+
+       fw_boot_success = true;
+
+fw_load_exit:
+       qtnf_pcie_fw_boot_done(bus, fw_boot_success, DRV_NAME);
+
+       if (fw_boot_success) {
+               qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
+               qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
+       }
+}
+
+static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data)
+{
+       struct qtnf_pcie_pearl_state *ps = (void *)data;
+
+       qtnf_pearl_data_tx_reclaim(ps);
+       qtnf_en_txdone_irq(ps);
+}
+
+static int qtnf_pearl_check_chip_id(struct qtnf_pcie_pearl_state *ps)
+{
+       unsigned int chipid;
+
+       chipid = qtnf_chip_id_get(ps->base.sysctl_bar);
+
+       switch (chipid) {
+       case QTN_CHIP_ID_PEARL:
+       case QTN_CHIP_ID_PEARL_B:
+       case QTN_CHIP_ID_PEARL_C:
+               pr_info("chip ID is 0x%x\n", chipid);
+               break;
+       default:
+               pr_err("incorrect chip ID 0x%x\n", chipid);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int qtnf_pcie_pearl_probe(struct pci_dev *pdev,
+                                const struct pci_device_id *id)
+{
+       struct qtnf_shm_ipc_int ipc_int;
+       struct qtnf_pcie_pearl_state *ps;
+       struct qtnf_bus *bus;
+       int ret;
+       u64 dma_mask;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       dma_mask = DMA_BIT_MASK(64);
+#else
+       dma_mask = DMA_BIT_MASK(32);
+#endif
+
+       ret = qtnf_pcie_probe(pdev, sizeof(*ps), &qtnf_pcie_pearl_bus_ops,
+                             dma_mask, use_msi);
+       if (ret)
+               return ret;
+
+       bus = pci_get_drvdata(pdev);
+       ps = get_bus_priv(bus);
+
+       spin_lock_init(&ps->irq_lock);
+
+       tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn,
+                    (unsigned long)ps);
+       netif_napi_add(&bus->mux_dev, &bus->mux_napi,
+                      qtnf_pcie_pearl_rx_poll, 10);
+       INIT_WORK(&bus->fw_work, qtnf_pearl_fw_work_handler);
+
+       ps->pcie_reg_base = ps->base.dmareg_bar;
+       ps->bda = ps->base.epmem_bar;
+       writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled);
+
+       ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int;
+       ipc_int.arg = ps;
+       qtnf_pcie_init_shm_ipc(&ps->base, &ps->bda->bda_shm_reg1,
+                              &ps->bda->bda_shm_reg2, &ipc_int);
+
+       ret = qtnf_pearl_check_chip_id(ps);
+       if (ret)
+               goto error;
+
+       ret = qtnf_pcie_pearl_init_xfer(ps);
+       if (ret) {
+               pr_err("PCIE xfer init failed\n");
+               goto error;
+       }
+
+       /* init default irq settings */
+       qtnf_init_hdp_irqs(ps);
+
+       /* start with disabled irqs */
+       qtnf_disable_hdp_irqs(ps);
+
+       ret = devm_request_irq(&pdev->dev, pdev->irq,
+                              &qtnf_pcie_pearl_interrupt, 0,
+                              "qtnf_pcie_irq", (void *)bus);
+       if (ret) {
+               pr_err("failed to request pcie irq %d\n", pdev->irq);
+               goto err_xfer;
+       }
+
+       qtnf_pcie_bringup_fw_async(bus);
+
+       return 0;
+
+err_xfer:
+       qtnf_pearl_free_xfer_buffers(ps);
+error:
+       qtnf_pcie_remove(bus, &ps->base);
+
+       return ret;
+}
+
+static void qtnf_pcie_pearl_remove(struct pci_dev *pdev)
+{
+       struct qtnf_pcie_pearl_state *ps;
+       struct qtnf_bus *bus;
+
+       bus = pci_get_drvdata(pdev);
+       if (!bus)
+               return;
+
+       ps = get_bus_priv(bus);
+
+       qtnf_pcie_remove(bus, &ps->base);
+       qtnf_pearl_reset_ep(ps);
+       qtnf_pearl_free_xfer_buffers(ps);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int qtnf_pcie_pearl_suspend(struct device *dev)
+{
+       return -EOPNOTSUPP;
+}
+
+static int qtnf_pcie_pearl_resume(struct device *dev)
+{
+       return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_SLEEP
+/* Power Management Hooks */
+static SIMPLE_DEV_PM_OPS(qtnf_pcie_pearl_pm_ops, qtnf_pcie_pearl_suspend,
+                        qtnf_pcie_pearl_resume);
+#endif
+
+static const struct pci_device_id qtnf_pcie_devid_table[] = {
+       {
+               PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+       },
+       { },
+};
+
+MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table);
+
+static struct pci_driver qtnf_pcie_pearl_drv_data = {
+       .name = DRV_NAME,
+       .id_table = qtnf_pcie_devid_table,
+       .probe = qtnf_pcie_pearl_probe,
+       .remove = qtnf_pcie_pearl_remove,
+#ifdef CONFIG_PM_SLEEP
+       .driver = {
+               .pm = &qtnf_pcie_pearl_pm_ops,
+       },
+#endif
+};
+
+static int __init qtnf_pcie_pearl_register(void)
+{
+       pr_info("register Quantenna QSR10g FullMAC PCIE driver\n");
+       return pci_register_driver(&qtnf_pcie_pearl_drv_data);
+}
+
+static void __exit qtnf_pcie_pearl_exit(void)
+{
+       pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n");
+       pci_unregister_driver(&qtnf_pcie_pearl_drv_data);
+}
+
+module_init(qtnf_pcie_pearl_register);
+module_exit(qtnf_pcie_pearl_exit);
+
+MODULE_AUTHOR("Quantenna Communications");
+MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
new file mode 100644 (file)
index 0000000..f21e97e
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QTN_FMAC_PCIE_IPC_H_
+#define _QTN_FMAC_PCIE_IPC_H_
+
+#include <linux/types.h>
+
+#include "shm_ipc_defs.h"
+
+/* bitmap for EP status and flags: updated by EP, read by RC */
+#define QTN_EP_HAS_UBOOT       BIT(0)
+#define QTN_EP_HAS_FIRMWARE    BIT(1)
+#define QTN_EP_REQ_UBOOT       BIT(2)
+#define QTN_EP_REQ_FIRMWARE    BIT(3)
+#define QTN_EP_ERROR_UBOOT     BIT(4)
+#define QTN_EP_ERROR_FIRMWARE  BIT(5)
+
+#define QTN_EP_FW_LOADRDY      BIT(8)
+#define QTN_EP_FW_SYNC         BIT(9)
+#define QTN_EP_FW_RETRY                BIT(10)
+#define QTN_EP_FW_QLINK_DONE   BIT(15)
+#define QTN_EP_FW_DONE         BIT(16)
+
+/* bitmap for RC status and flags: updated by RC, read by EP */
+#define QTN_RC_PCIE_LINK       BIT(0)
+#define QTN_RC_NET_LINK                BIT(1)
+#define QTN_RC_FW_FLASHBOOT    BIT(5)
+#define QTN_RC_FW_QLINK                BIT(7)
+#define QTN_RC_FW_LOADRDY      BIT(8)
+#define QTN_RC_FW_SYNC         BIT(9)
+
+#define PCIE_HDP_INT_RX_BITS (0                \
+       | PCIE_HDP_INT_EP_TXDMA         \
+       | PCIE_HDP_INT_EP_TXEMPTY       \
+       | PCIE_HDP_INT_HHBM_UF          \
+       )
+
+#define PCIE_HDP_INT_TX_BITS (0                \
+       | PCIE_HDP_INT_EP_RXDMA         \
+       )
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#define QTN_HOST_HI32(a)       ((u32)(((u64)a) >> 32))
+#define QTN_HOST_LO32(a)       ((u32)(((u64)a) & 0xffffffffUL))
+#define QTN_HOST_ADDR(h, l)    ((((u64)h) << 32) | ((u64)l))
+#else
+#define QTN_HOST_HI32(a)       0
+#define QTN_HOST_LO32(a)       ((u32)(((u32)a) & 0xffffffffUL))
+#define QTN_HOST_ADDR(h, l)    ((u32)l)
+#endif
+
+#define QTN_PCIE_BDA_VERSION           0x1002
+
+#define PCIE_BDA_NAMELEN               32
+#define PCIE_HHBM_MAX_SIZE             2048
+
+#define QTN_PCIE_BOARDFLG      "PCIEQTN"
+#define QTN_PCIE_FW_DLMASK     0xF
+#define QTN_PCIE_FW_BUFSZ      2048
+
+#define QTN_ENET_ADDR_LENGTH   6
+
+#define QTN_TXDONE_MASK                ((u32)0x80000000)
+#define QTN_GET_LEN(x)         ((x) & 0xFFFF)
+
+#define QTN_PCIE_TX_DESC_LEN_MASK      0xFFFF
+#define QTN_PCIE_TX_DESC_LEN_SHIFT     0
+#define QTN_PCIE_TX_DESC_PORT_MASK     0xF
+#define QTN_PCIE_TX_DESC_PORT_SHIFT    16
+#define QTN_PCIE_TX_DESC_TQE_BIT       BIT(24)
+
+#define QTN_EP_LHOST_TQE_PORT  4
+
+enum qtnf_pcie_bda_ipc_flags {
+       QTN_PCIE_IPC_FLAG_HBM_MAGIC     = BIT(0),
+       QTN_PCIE_IPC_FLAG_SHM_PIO       = BIT(1),
+};
+
+enum qtnf_fw_loadtype {
+       QTN_FW_DBEGIN,
+       QTN_FW_DSUB,
+       QTN_FW_DEND,
+       QTN_FW_CTRL
+};
+
+#endif /* _QTN_FMAC_PCIE_IPC_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
new file mode 100644 (file)
index 0000000..0bfe285
--- /dev/null
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2015 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __PEARL_PCIE_H
+#define __PEARL_PCIE_H
+
+#define        PCIE_GEN2_BASE                          (0xe9000000)
+#define        PCIE_GEN3_BASE                          (0xe7000000)
+
+#define PEARL_CUR_PCIE_BASE                    (PCIE_GEN2_BASE)
+#define PCIE_HDP_OFFSET                                (0x2000)
+
+#define PCIE_HDP_CTRL(base)                    ((base) + 0x2c00)
+#define PCIE_HDP_AXI_CTRL(base)                        ((base) + 0x2c04)
+#define PCIE_HDP_HOST_WR_DESC0(base)           ((base) + 0x2c10)
+#define PCIE_HDP_HOST_WR_DESC0_H(base)         ((base) + 0x2c14)
+#define PCIE_HDP_HOST_WR_DESC1(base)           ((base) + 0x2c18)
+#define PCIE_HDP_HOST_WR_DESC1_H(base)         ((base) + 0x2c1c)
+#define PCIE_HDP_HOST_WR_DESC2(base)           ((base) + 0x2c20)
+#define PCIE_HDP_HOST_WR_DESC2_H(base)         ((base) + 0x2c24)
+#define PCIE_HDP_HOST_WR_DESC3(base)           ((base) + 0x2c28)
+#define PCIE_HDP_HOST_WR_DESC4_H(base)         ((base) + 0x2c2c)
+#define PCIE_HDP_RX_INT_CTRL(base)             ((base) + 0x2c30)
+#define PCIE_HDP_TX_INT_CTRL(base)             ((base) + 0x2c34)
+#define PCIE_HDP_INT_STATUS(base)              ((base) + 0x2c38)
+#define PCIE_HDP_INT_EN(base)                  ((base) + 0x2c3c)
+#define PCIE_HDP_RX_DESC0_PTR(base)            ((base) + 0x2c40)
+#define PCIE_HDP_RX_DESC0_NOE(base)            ((base) + 0x2c44)
+#define PCIE_HDP_RX_DESC1_PTR(base)            ((base) + 0x2c48)
+#define PCIE_HDP_RX_DESC1_NOE(base)            ((base) + 0x2c4c)
+#define PCIE_HDP_RX_DESC2_PTR(base)            ((base) + 0x2c50)
+#define PCIE_HDP_RX_DESC2_NOE(base)            ((base) + 0x2c54)
+#define PCIE_HDP_RX_DESC3_PTR(base)            ((base) + 0x2c58)
+#define PCIE_HDP_RX_DESC3_NOE(base)            ((base) + 0x2c5c)
+
+#define PCIE_HDP_TX0_BASE_ADDR(base)           ((base) + 0x2c60)
+#define PCIE_HDP_TX1_BASE_ADDR(base)           ((base) + 0x2c64)
+#define PCIE_HDP_TX0_Q_CTRL(base)              ((base) + 0x2c70)
+#define PCIE_HDP_TX1_Q_CTRL(base)              ((base) + 0x2c74)
+#define PCIE_HDP_CFG0(base)                    ((base) + 0x2c80)
+#define PCIE_HDP_CFG1(base)                    ((base) + 0x2c84)
+#define PCIE_HDP_CFG2(base)                    ((base) + 0x2c88)
+#define PCIE_HDP_CFG3(base)                    ((base) + 0x2c8c)
+#define PCIE_HDP_CFG4(base)                    ((base) + 0x2c90)
+#define PCIE_HDP_CFG5(base)                    ((base) + 0x2c94)
+#define PCIE_HDP_CFG6(base)                    ((base) + 0x2c98)
+#define PCIE_HDP_CFG7(base)                    ((base) + 0x2c9c)
+#define PCIE_HDP_CFG8(base)                    ((base) + 0x2ca0)
+#define PCIE_HDP_CFG9(base)                    ((base) + 0x2ca4)
+#define PCIE_HDP_CFG10(base)                   ((base) + 0x2ca8)
+#define PCIE_HDP_CFG11(base)                   ((base) + 0x2cac)
+#define PCIE_INT(base)                         ((base) + 0x2cb0)
+#define PCIE_INT_MASK(base)                    ((base) + 0x2cb4)
+#define PCIE_MSI_MASK(base)                    ((base) + 0x2cb8)
+#define PCIE_MSI_PNDG(base)                    ((base) + 0x2cbc)
+#define PCIE_PRI_CFG(base)                     ((base) + 0x2cc0)
+#define PCIE_PHY_CR(base)                      ((base) + 0x2cc4)
+#define PCIE_HDP_CTAG_CTRL(base)               ((base) + 0x2cf4)
+#define PCIE_HDP_HHBM_BUF_PTR(base)            ((base) + 0x2d00)
+#define PCIE_HDP_HHBM_BUF_PTR_H(base)          ((base) + 0x2d04)
+#define PCIE_HDP_HHBM_BUF_FIFO_NOE(base)       ((base) + 0x2d04)
+#define PCIE_HDP_RX0DMA_CNT(base)              ((base) + 0x2d10)
+#define PCIE_HDP_RX1DMA_CNT(base)              ((base) + 0x2d14)
+#define PCIE_HDP_RX2DMA_CNT(base)              ((base) + 0x2d18)
+#define PCIE_HDP_RX3DMA_CNT(base)              ((base) + 0x2d1c)
+#define PCIE_HDP_TX0DMA_CNT(base)              ((base) + 0x2d20)
+#define PCIE_HDP_TX1DMA_CNT(base)              ((base) + 0x2d24)
+#define PCIE_HDP_RXDMA_CTRL(base)              ((base) + 0x2d28)
+#define PCIE_HDP_TX_HOST_Q_SZ_CTRL(base)       ((base) + 0x2d2c)
+#define PCIE_HDP_TX_HOST_Q_BASE_L(base)                ((base) + 0x2d30)
+#define PCIE_HDP_TX_HOST_Q_BASE_H(base)                ((base) + 0x2d34)
+#define PCIE_HDP_TX_HOST_Q_WR_PTR(base)                ((base) + 0x2d38)
+#define PCIE_HDP_TX_HOST_Q_RD_PTR(base)                ((base) + 0x2d3c)
+#define PCIE_HDP_TX_HOST_Q_STS(base)           ((base) + 0x2d40)
+
+/* Host HBM pool registers */
+#define PCIE_HHBM_CSR_REG(base)                        ((base) + 0x2e00)
+#define PCIE_HHBM_Q_BASE_REG(base)             ((base) + 0x2e04)
+#define PCIE_HHBM_Q_LIMIT_REG(base)            ((base) + 0x2e08)
+#define PCIE_HHBM_Q_WR_REG(base)               ((base) + 0x2e0c)
+#define PCIE_HHBM_Q_RD_REG(base)               ((base) + 0x2e10)
+#define PCIE_HHBM_POOL_DATA_0_H(base)          ((base) + 0x2e90)
+#define PCIE_HHBM_CONFIG(base)                 ((base) + 0x2f9c)
+#define PCIE_HHBM_POOL_REQ_0(base)             ((base) + 0x2f10)
+#define PCIE_HHBM_POOL_DATA_0(base)            ((base) + 0x2f40)
+#define PCIE_HHBM_WATERMARK_MASKED_INT(base)   ((base) + 0x2f68)
+#define PCIE_HHBM_WATERMARK_INT(base)          ((base) + 0x2f6c)
+#define PCIE_HHBM_POOL_WATERMARK(base)         ((base) + 0x2f70)
+#define PCIE_HHBM_POOL_OVERFLOW_CNT(base)      ((base) + 0x2f90)
+#define PCIE_HHBM_POOL_UNDERFLOW_CNT(base)     ((base) + 0x2f94)
+#define HBM_INT_STATUS(base)                   ((base) + 0x2f9c)
+#define PCIE_HHBM_POOL_CNFIG(base)             ((base) + 0x2f9c)
+
+/* host HBM bit field definition */
+#define HHBM_CONFIG_SOFT_RESET                 (BIT(8))
+#define HHBM_WR_REQ                            (BIT(0))
+#define HHBM_RD_REQ                            (BIT(1))
+#define HHBM_DONE                              (BIT(31))
+#define HHBM_64BIT                             (BIT(10))
+
+/* offsets for dual PCIE */
+#define PCIE_PORT_LINK_CTL(base)               ((base) + 0x0710)
+#define PCIE_GEN2_CTL(base)                    ((base) + 0x080C)
+#define PCIE_GEN3_OFF(base)                    ((base) + 0x0890)
+#define PCIE_ATU_CTRL1(base)                   ((base) + 0x0904)
+#define PCIE_ATU_CTRL2(base)                   ((base) + 0x0908)
+#define PCIE_ATU_BASE_LOW(base)                        ((base) + 0x090C)
+#define PCIE_ATU_BASE_HIGH(base)               ((base) + 0x0910)
+#define PCIE_ATU_BASE_LIMIT(base)              ((base) + 0x0914)
+#define PCIE_ATU_TGT_LOW(base)                 ((base) + 0x0918)
+#define PCIE_ATU_TGT_HIGH(base)                        ((base) + 0x091C)
+#define PCIE_DMA_WR_ENABLE(base)               ((base) + 0x097C)
+#define PCIE_DMA_WR_CHWTLOW(base)              ((base) + 0x0988)
+#define PCIE_DMA_WR_CHWTHIG(base)              ((base) + 0x098C)
+#define PCIE_DMA_WR_INTSTS(base)               ((base) + 0x09BC)
+#define PCIE_DMA_WR_INTMASK(base)              ((base) + 0x09C4)
+#define PCIE_DMA_WR_INTCLER(base)              ((base) + 0x09C8)
+#define PCIE_DMA_WR_DONE_IMWR_ADDR_L(base)     ((base) + 0x09D0)
+#define PCIE_DMA_WR_DONE_IMWR_ADDR_H(base)     ((base) + 0x09D4)
+#define PCIE_DMA_WR_ABORT_IMWR_ADDR_L(base)    ((base) + 0x09D8)
+#define PCIE_DMA_WR_ABORT_IMWR_ADDR_H(base)    ((base) + 0x09DC)
+#define PCIE_DMA_WR_IMWR_DATA(base)            ((base) + 0x09E0)
+#define PCIE_DMA_WR_LL_ERR_EN(base)            ((base) + 0x0A00)
+#define PCIE_DMA_WR_DOORBELL(base)             ((base) + 0x0980)
+#define PCIE_DMA_RD_ENABLE(base)               ((base) + 0x099C)
+#define PCIE_DMA_RD_DOORBELL(base)             ((base) + 0x09A0)
+#define PCIE_DMA_RD_CHWTLOW(base)              ((base) + 0x09A8)
+#define PCIE_DMA_RD_CHWTHIG(base)              ((base) + 0x09AC)
+#define PCIE_DMA_RD_INTSTS(base)               ((base) + 0x0A10)
+#define PCIE_DMA_RD_INTMASK(base)              ((base) + 0x0A18)
+#define PCIE_DMA_RD_INTCLER(base)              ((base) + 0x0A1C)
+#define PCIE_DMA_RD_ERR_STS_L(base)            ((base) + 0x0A24)
+#define PCIE_DMA_RD_ERR_STS_H(base)            ((base) + 0x0A28)
+#define PCIE_DMA_RD_LL_ERR_EN(base)            ((base) + 0x0A34)
+#define PCIE_DMA_RD_DONE_IMWR_ADDR_L(base)     ((base) + 0x0A3C)
+#define PCIE_DMA_RD_DONE_IMWR_ADDR_H(base)     ((base) + 0x0A40)
+#define PCIE_DMA_RD_ABORT_IMWR_ADDR_L(base)    ((base) + 0x0A44)
+#define PCIE_DMA_RD_ABORT_IMWR_ADDR_H(base)    ((base) + 0x0A48)
+#define PCIE_DMA_RD_IMWR_DATA(base)            ((base) + 0x0A4C)
+#define PCIE_DMA_CHNL_CONTEXT(base)            ((base) + 0x0A6C)
+#define PCIE_DMA_CHNL_CNTRL(base)              ((base) + 0x0A70)
+#define PCIE_DMA_XFR_SIZE(base)                        ((base) + 0x0A78)
+#define PCIE_DMA_SAR_LOW(base)                 ((base) + 0x0A7C)
+#define PCIE_DMA_SAR_HIGH(base)                        ((base) + 0x0A80)
+#define PCIE_DMA_DAR_LOW(base)                 ((base) + 0x0A84)
+#define PCIE_DMA_DAR_HIGH(base)                        ((base) + 0x0A88)
+#define PCIE_DMA_LLPTR_LOW(base)               ((base) + 0x0A8C)
+#define PCIE_DMA_LLPTR_HIGH(base)              ((base) + 0x0A90)
+#define PCIE_DMA_WRLL_ERR_ENB(base)            ((base) + 0x0A00)
+#define PCIE_DMA_RDLL_ERR_ENB(base)            ((base) + 0x0A34)
+#define PCIE_DMABD_CHNL_CNTRL(base)            ((base) + 0x8000)
+#define PCIE_DMABD_XFR_SIZE(base)              ((base) + 0x8004)
+#define PCIE_DMABD_SAR_LOW(base)               ((base) + 0x8008)
+#define PCIE_DMABD_SAR_HIGH(base)              ((base) + 0x800c)
+#define PCIE_DMABD_DAR_LOW(base)               ((base) + 0x8010)
+#define PCIE_DMABD_DAR_HIGH(base)              ((base) + 0x8014)
+#define PCIE_DMABD_LLPTR_LOW(base)             ((base) + 0x8018)
+#define PCIE_DMABD_LLPTR_HIGH(base)            ((base) + 0x801c)
+#define PCIE_WRDMA0_CHNL_CNTRL(base)           ((base) + 0x8000)
+#define PCIE_WRDMA0_XFR_SIZE(base)             ((base) + 0x8004)
+#define PCIE_WRDMA0_SAR_LOW(base)              ((base) + 0x8008)
+#define PCIE_WRDMA0_SAR_HIGH(base)             ((base) + 0x800c)
+#define PCIE_WRDMA0_DAR_LOW(base)              ((base) + 0x8010)
+#define PCIE_WRDMA0_DAR_HIGH(base)             ((base) + 0x8014)
+#define PCIE_WRDMA0_LLPTR_LOW(base)            ((base) + 0x8018)
+#define PCIE_WRDMA0_LLPTR_HIGH(base)           ((base) + 0x801c)
+#define PCIE_WRDMA1_CHNL_CNTRL(base)           ((base) + 0x8020)
+#define PCIE_WRDMA1_XFR_SIZE(base)             ((base) + 0x8024)
+#define PCIE_WRDMA1_SAR_LOW(base)              ((base) + 0x8028)
+#define PCIE_WRDMA1_SAR_HIGH(base)             ((base) + 0x802c)
+#define PCIE_WRDMA1_DAR_LOW(base)              ((base) + 0x8030)
+#define PCIE_WRDMA1_DAR_HIGH(base)             ((base) + 0x8034)
+#define PCIE_WRDMA1_LLPTR_LOW(base)            ((base) + 0x8038)
+#define PCIE_WRDMA1_LLPTR_HIGH(base)           ((base) + 0x803c)
+#define PCIE_RDDMA0_CHNL_CNTRL(base)           ((base) + 0x8040)
+#define PCIE_RDDMA0_XFR_SIZE(base)             ((base) + 0x8044)
+#define PCIE_RDDMA0_SAR_LOW(base)              ((base) + 0x8048)
+#define PCIE_RDDMA0_SAR_HIGH(base)             ((base) + 0x804c)
+#define PCIE_RDDMA0_DAR_LOW(base)              ((base) + 0x8050)
+#define PCIE_RDDMA0_DAR_HIGH(base)             ((base) + 0x8054)
+#define PCIE_RDDMA0_LLPTR_LOW(base)            ((base) + 0x8058)
+#define PCIE_RDDMA0_LLPTR_HIGH(base)           ((base) + 0x805c)
+#define PCIE_RDDMA1_CHNL_CNTRL(base)           ((base) + 0x8060)
+#define PCIE_RDDMA1_XFR_SIZE(base)             ((base) + 0x8064)
+#define PCIE_RDDMA1_SAR_LOW(base)              ((base) + 0x8068)
+#define PCIE_RDDMA1_SAR_HIGH(base)             ((base) + 0x806c)
+#define PCIE_RDDMA1_DAR_LOW(base)              ((base) + 0x8070)
+#define PCIE_RDDMA1_DAR_HIGH(base)             ((base) + 0x8074)
+#define PCIE_RDDMA1_LLPTR_LOW(base)            ((base) + 0x8078)
+#define PCIE_RDDMA1_LLPTR_HIGH(base)           ((base) + 0x807c)
+
+#define PCIE_ID(base)                          ((base) + 0x0000)
+#define PCIE_CMD(base)                         ((base) + 0x0004)
+#define PCIE_BAR(base, n)                      ((base) + 0x0010 + ((n) << 2))
+#define PCIE_CAP_PTR(base)                     ((base) + 0x0034)
+#define PCIE_MSI_LBAR(base)                    ((base) + 0x0054)
+#define PCIE_MSI_CTRL(base)                    ((base) + 0x0050)
+#define PCIE_MSI_ADDR_L(base)                  ((base) + 0x0054)
+#define PCIE_MSI_ADDR_H(base)                  ((base) + 0x0058)
+#define PCIE_MSI_DATA(base)                    ((base) + 0x005C)
+#define PCIE_MSI_MASK_BIT(base)                        ((base) + 0x0060)
+#define PCIE_MSI_PEND_BIT(base)                        ((base) + 0x0064)
+#define PCIE_DEVCAP(base)                      ((base) + 0x0074)
+#define PCIE_DEVCTLSTS(base)                   ((base) + 0x0078)
+
+#define PCIE_CMDSTS(base)                      ((base) + 0x0004)
+#define PCIE_LINK_STAT(base)                   ((base) + 0x80)
+#define PCIE_LINK_CTL2(base)                   ((base) + 0xa0)
+#define PCIE_ASPM_L1_CTRL(base)                        ((base) + 0x70c)
+#define PCIE_ASPM_LINK_CTRL(base)              (PCIE_LINK_STAT)
+#define PCIE_ASPM_L1_SUBSTATE_TIMING(base)     ((base) + 0xB44)
+#define PCIE_L1SUB_CTRL1(base)                 ((base) + 0x150)
+#define PCIE_PMCSR(base)                       ((base) + 0x44)
+#define PCIE_CFG_SPACE_LIMIT(base)             ((base) + 0x100)
+
+/* PCIe link defines */
+#define PEARL_PCIE_LINKUP                      (0x7)
+#define PEARL_PCIE_DATA_LINK                   (BIT(0))
+#define PEARL_PCIE_PHY_LINK                    (BIT(1))
+#define PEARL_PCIE_LINK_RST                    (BIT(3))
+#define PEARL_PCIE_FATAL_ERR                   (BIT(5))
+#define PEARL_PCIE_NONFATAL_ERR                        (BIT(6))
+
+/* PCIe Lane defines */
+#define PCIE_G2_LANE_X1                                ((BIT(0)) << 16)
+#define PCIE_G2_LANE_X2                                ((BIT(0) | BIT(1)) << 16)
+
+/* PCIe DLL link enable */
+#define PCIE_DLL_LINK_EN                       ((BIT(0)) << 5)
+
+#define PCIE_LINK_GEN1                         (BIT(0))
+#define PCIE_LINK_GEN2                         (BIT(1))
+#define PCIE_LINK_GEN3                         (BIT(2))
+#define PCIE_LINK_MODE(x)                      (((x) >> 16) & 0x7)
+
+#define MSI_EN                                 (BIT(0))
+#define MSI_64_EN                              (BIT(7))
+#define PCIE_MSI_ADDR_OFFSET(a)                        ((a) & 0xFFFF)
+#define PCIE_MSI_ADDR_ALIGN(a)                 ((a) & (~0xFFFF))
+
+#define PCIE_BAR_MASK(base, n)                 ((base) + 0x1010 + ((n) << 2))
+#define PCIE_MAX_BAR                           (6)
+
+#define PCIE_ATU_VIEW(base)                    ((base) + 0x0900)
+#define PCIE_ATU_CTL1(base)                    ((base) + 0x0904)
+#define PCIE_ATU_CTL2(base)                    ((base) + 0x0908)
+#define PCIE_ATU_LBAR(base)                    ((base) + 0x090c)
+#define PCIE_ATU_UBAR(base)                    ((base) + 0x0910)
+#define PCIE_ATU_LAR(base)                     ((base) + 0x0914)
+#define PCIE_ATU_LTAR(base)                    ((base) + 0x0918)
+#define PCIE_ATU_UTAR(base)                    ((base) + 0x091c)
+
+#define PCIE_MSI_ADDR_LOWER(base)              ((base) + 0x0820)
+#define PCIE_MSI_ADDR_UPPER(base)              ((base) + 0x0824)
+#define PCIE_MSI_ENABLE(base)                  ((base) + 0x0828)
+#define PCIE_MSI_MASK_RC(base)                 ((base) + 0x082c)
+#define PCIE_MSI_STATUS(base)                  ((base) + 0x0830)
+#define PEARL_PCIE_MSI_REGION                  (0xce000000)
+#define PEARL_PCIE_MSI_DATA                    (0)
+#define PCIE_MSI_GPIO(base)                    ((base) + 0x0888)
+
+#define PCIE_HDP_HOST_QUEUE_FULL       (BIT(17))
+#define USE_BAR_MATCH_MODE
+#define PCIE_ATU_OB_REGION             (BIT(0))
+#define PCIE_ATU_EN_REGION             (BIT(31))
+#define PCIE_ATU_EN_MATCH              (BIT(30))
+#define PCIE_BASE_REGION               (0xb0000000)
+#define PCIE_MEM_MAP_SIZE              (512 * 1024)
+
+#define PCIE_OB_REG_REGION             (0xcf000000)
+#define PCIE_CONFIG_REGION             (0xcf000000)
+#define PCIE_CONFIG_SIZE               (4096)
+#define PCIE_CONFIG_CH                 (1)
+
+/* inbound mapping */
+#define PCIE_IB_BAR0                   (0x00000000)    /* ddr */
+#define PCIE_IB_BAR0_CH                        (0)
+#define PCIE_IB_BAR3                   (0xe0000000)    /* sys_reg */
+#define PCIE_IB_BAR3_CH                        (1)
+
+/* outbound mapping */
+#define PCIE_MEM_CH                    (0)
+#define PCIE_REG_CH                    (1)
+#define PCIE_MEM_REGION                        (0xc0000000)
+#define        PCIE_MEM_SIZE                   (0x000fffff)
+#define PCIE_MEM_TAR                   (0x80000000)
+
+#define PCIE_MSI_REGION                        (0xce000000)
+#define PCIE_MSI_SIZE                  (KBYTE(4) - 1)
+#define PCIE_MSI_CH                    (1)
+
+/* size of config region */
+#define PCIE_CFG_SIZE                  (0x0000ffff)
+
+#define PCIE_ATU_DIR_IB                        (BIT(31))
+#define PCIE_ATU_DIR_OB                        (0)
+#define PCIE_ATU_DIR_CFG               (2)
+#define PCIE_ATU_DIR_MATCH_IB          (BIT(31) | BIT(30))
+
+#define PCIE_DMA_WR_0                  (0)
+#define PCIE_DMA_WR_1                  (1)
+#define PCIE_DMA_RD_0                  (2)
+#define PCIE_DMA_RD_1                  (3)
+
+#define PCIE_DMA_CHNL_CNTRL_CB         (BIT(0))
+#define PCIE_DMA_CHNL_CNTRL_TCB                (BIT(1))
+#define PCIE_DMA_CHNL_CNTRL_LLP                (BIT(2))
+#define PCIE_DMA_CHNL_CNTRL_LIE                (BIT(3))
+#define PCIE_DMA_CHNL_CNTRL_RIE                (BIT(4))
+#define PCIE_DMA_CHNL_CNTRL_CSS                (BIT(8))
+#define PCIE_DMA_CHNL_CNTRL_LLE                (BIT(9))
+#define PCIE_DMA_CHNL_CNTRL_TLP                (BIT(26))
+
+#define PCIE_DMA_CHNL_CONTEXT_RD       (BIT(31))
+#define PCIE_DMA_CHNL_CONTEXT_WR       (0)
+#define PCIE_MAX_BAR                   (6)
+
+/* PCIe HDP interrupt status definition */
+#define PCIE_HDP_INT_EP_RXDMA          (BIT(0))
+#define PCIE_HDP_INT_HBM_UF            (BIT(1))
+#define PCIE_HDP_INT_RX_LEN_ERR                (BIT(2))
+#define PCIE_HDP_INT_RX_HDR_LEN_ERR    (BIT(3))
+#define PCIE_HDP_INT_EP_TXDMA          (BIT(12))
+#define PCIE_HDP_INT_HHBM_UF           (BIT(13))
+#define PCIE_HDP_INT_EP_TXEMPTY                (BIT(15))
+#define PCIE_HDP_INT_IPC               (BIT(29))
+
+/* PCIe interrupt status definition */
+#define PCIE_INT_MSI                   (BIT(24))
+#define PCIE_INT_INTX                  (BIT(23))
+
+/* PCIe legacy INTx */
+#define PEARL_PCIE_CFG0_OFFSET         (0x6C)
+#define PEARL_ASSERT_INTX              (BIT(9))
+
+/* SYS CTL regs */
+#define QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET      (0x001C)
+
+#define QTN_PEARL_IPC_IRQ_WORD(irq)    (BIT(irq) | BIT(irq + 16))
+#define QTN_PEARL_LHOST_IPC_IRQ                (6)
+#define QTN_PEARL_LHOST_EP_RESET       (7)
+
+#endif /* __PEARL_PCIE_H */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
deleted file mode 100644 (file)
index 3120d49..0000000
+++ /dev/null
@@ -1,1494 +0,0 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/firmware.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/crc32.h>
-#include <linux/spinlock.h>
-#include <linux/circ_buf.h>
-#include <linux/log2.h>
-
-#include "qtn_hw_ids.h"
-#include "pcie_bus_priv.h"
-#include "core.h"
-#include "bus.h"
-#include "debug.h"
-
-static bool use_msi = true;
-module_param(use_msi, bool, 0644);
-MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
-
-static unsigned int tx_bd_size_param = 32;
-module_param(tx_bd_size_param, uint, 0644);
-MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two");
-
-static unsigned int rx_bd_size_param = 256;
-module_param(rx_bd_size_param, uint, 0644);
-MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two");
-
-static u8 flashboot = 1;
-module_param(flashboot, byte, 0644);
-MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS");
-
-#define DRV_NAME       "qtnfmac_pearl_pcie"
-
-static inline void qtnf_non_posted_write(u32 val, void __iomem *basereg)
-{
-       writel(val, basereg);
-
-       /* flush posted write */
-       readl(basereg);
-}
-
-static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->irq_lock, flags);
-       priv->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS);
-       spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->irq_lock, flags);
-       writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
-       spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->irq_lock, flags);
-       writel(0x0, PCIE_HDP_INT_EN(priv->pcie_reg_base));
-       spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_bus_priv *priv)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->irq_lock, flags);
-       priv->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS;
-       writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
-       spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_bus_priv *priv)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->irq_lock, flags);
-       priv->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS;
-       writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
-       spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static inline void qtnf_en_txdone_irq(struct qtnf_pcie_bus_priv *priv)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->irq_lock, flags);
-       priv->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS;
-       writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
-       spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_bus_priv *priv)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->irq_lock, flags);
-       priv->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS;
-       writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
-       spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv)
-{
-       struct pci_dev *pdev = priv->pdev;
-
-       /* fall back to legacy INTx interrupts by default */
-       priv->msi_enabled = 0;
-
-       /* check if MSI capability is available */
-       if (use_msi) {
-               if (!pci_enable_msi(pdev)) {
-                       pr_debug("MSI interrupt enabled\n");
-                       priv->msi_enabled = 1;
-               } else {
-                       pr_warn("failed to enable MSI interrupts");
-               }
-       }
-
-       if (!priv->msi_enabled) {
-               pr_warn("legacy PCIE interrupts enabled\n");
-               pci_intx(pdev, 1);
-       }
-}
-
-static void qtnf_deassert_intx(struct qtnf_pcie_bus_priv *priv)
-{
-       void __iomem *reg = priv->sysctl_bar + PEARL_PCIE_CFG0_OFFSET;
-       u32 cfg;
-
-       cfg = readl(reg);
-       cfg &= ~PEARL_ASSERT_INTX;
-       qtnf_non_posted_write(cfg, reg);
-}
-
-static void qtnf_reset_card(struct qtnf_pcie_bus_priv *priv)
-{
-       const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET);
-       void __iomem *reg = priv->sysctl_bar +
-                           QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
-
-       qtnf_non_posted_write(data, reg);
-       msleep(QTN_EP_RESET_WAIT_MS);
-       pci_restore_state(priv->pdev);
-}
-
-static void qtnf_ipc_gen_ep_int(void *arg)
-{
-       const struct qtnf_pcie_bus_priv *priv = arg;
-       const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ);
-       void __iomem *reg = priv->sysctl_bar +
-                           QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
-
-       qtnf_non_posted_write(data, reg);
-}
-
-static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index)
-{
-       void __iomem *vaddr;
-       dma_addr_t busaddr;
-       size_t len;
-       int ret;
-
-       ret = pcim_iomap_regions(priv->pdev, 1 << index, DRV_NAME);
-       if (ret)
-               return IOMEM_ERR_PTR(ret);
-
-       busaddr = pci_resource_start(priv->pdev, index);
-       len = pci_resource_len(priv->pdev, index);
-       vaddr = pcim_iomap_table(priv->pdev)[index];
-       if (!vaddr)
-               return IOMEM_ERR_PTR(-ENOMEM);
-
-       pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n",
-                index, vaddr, &busaddr, (int)len);
-
-       return vaddr;
-}
-
-static void qtnf_pcie_control_rx_callback(void *arg, const u8 *buf, size_t len)
-{
-       struct qtnf_pcie_bus_priv *priv = arg;
-       struct qtnf_bus *bus = pci_get_drvdata(priv->pdev);
-       struct sk_buff *skb;
-
-       if (unlikely(len == 0)) {
-               pr_warn("zero length packet received\n");
-               return;
-       }
-
-       skb = __dev_alloc_skb(len, GFP_KERNEL);
-
-       if (unlikely(!skb)) {
-               pr_err("failed to allocate skb\n");
-               return;
-       }
-
-       skb_put_data(skb, buf, len);
-
-       qtnf_trans_handle_rx_ctl_packet(bus, skb);
-}
-
-static int qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv)
-{
-       struct qtnf_shm_ipc_region __iomem *ipc_tx_reg;
-       struct qtnf_shm_ipc_region __iomem *ipc_rx_reg;
-       const struct qtnf_shm_ipc_int ipc_int = { qtnf_ipc_gen_ep_int, priv };
-       const struct qtnf_shm_ipc_rx_callback rx_callback = {
-                                       qtnf_pcie_control_rx_callback, priv };
-
-       ipc_tx_reg = &priv->bda->bda_shm_reg1;
-       ipc_rx_reg = &priv->bda->bda_shm_reg2;
-
-       qtnf_shm_ipc_init(&priv->shm_ipc_ep_in, QTNF_SHM_IPC_OUTBOUND,
-                         ipc_tx_reg, priv->workqueue,
-                         &ipc_int, &rx_callback);
-       qtnf_shm_ipc_init(&priv->shm_ipc_ep_out, QTNF_SHM_IPC_INBOUND,
-                         ipc_rx_reg, priv->workqueue,
-                         &ipc_int, &rx_callback);
-
-       return 0;
-}
-
-static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv)
-{
-       qtnf_shm_ipc_free(&priv->shm_ipc_ep_in);
-       qtnf_shm_ipc_free(&priv->shm_ipc_ep_out);
-}
-
-static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv)
-{
-       int ret = -ENOMEM;
-
-       priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR);
-       if (IS_ERR(priv->sysctl_bar)) {
-               pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
-               return ret;
-       }
-
-       priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR);
-       if (IS_ERR(priv->dmareg_bar)) {
-               pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
-               return ret;
-       }
-
-       priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR);
-       if (IS_ERR(priv->epmem_bar)) {
-               pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
-               return ret;
-       }
-
-       priv->pcie_reg_base = priv->dmareg_bar;
-       priv->bda = priv->epmem_bar;
-       writel(priv->msi_enabled, &priv->bda->bda_rc_msi_enabled);
-
-       return 0;
-}
-
-static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv)
-{
-       struct pci_dev *pdev = priv->pdev;
-       struct pci_dev *parent;
-       int mps_p, mps_o, mps_m, mps;
-       int ret;
-
-       /* current mps */
-       mps_o = pcie_get_mps(pdev);
-
-       /* maximum supported mps */
-       mps_m = 128 << pdev->pcie_mpss;
-
-       /* suggested new mps value */
-       mps = mps_m;
-
-       if (pdev->bus && pdev->bus->self) {
-               /* parent (bus) mps */
-               parent = pdev->bus->self;
-
-               if (pci_is_pcie(parent)) {
-                       mps_p = pcie_get_mps(parent);
-                       mps = min(mps_m, mps_p);
-               }
-       }
-
-       ret = pcie_set_mps(pdev, mps);
-       if (ret) {
-               pr_err("failed to set mps to %d, keep using current %d\n",
-                      mps, mps_o);
-               priv->mps = mps_o;
-               return;
-       }
-
-       pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m);
-       priv->mps = mps;
-}
-
-static int qtnf_is_state(__le32 __iomem *reg, u32 state)
-{
-       u32 s = readl(reg);
-
-       return s & state;
-}
-
-static void qtnf_set_state(__le32 __iomem *reg, u32 state)
-{
-       u32 s = readl(reg);
-
-       qtnf_non_posted_write(state | s, reg);
-}
-
-static void qtnf_clear_state(__le32 __iomem *reg, u32 state)
-{
-       u32 s = readl(reg);
-
-       qtnf_non_posted_write(s & ~state, reg);
-}
-
-static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
-{
-       u32 timeout = 0;
-
-       while ((qtnf_is_state(reg, state) == 0)) {
-               usleep_range(1000, 1200);
-               if (++timeout > delay_in_ms)
-                       return -1;
-       }
-
-       return 0;
-}
-
-static int alloc_skb_array(struct qtnf_pcie_bus_priv *priv)
-{
-       struct sk_buff **vaddr;
-       int len;
-
-       len = priv->tx_bd_num * sizeof(*priv->tx_skb) +
-               priv->rx_bd_num * sizeof(*priv->rx_skb);
-       vaddr = devm_kzalloc(&priv->pdev->dev, len, GFP_KERNEL);
-
-       if (!vaddr)
-               return -ENOMEM;
-
-       priv->tx_skb = vaddr;
-
-       vaddr += priv->tx_bd_num;
-       priv->rx_skb = vaddr;
-
-       return 0;
-}
-
-static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv)
-{
-       dma_addr_t paddr;
-       void *vaddr;
-       int len;
-
-       len = priv->tx_bd_num * sizeof(struct qtnf_tx_bd) +
-               priv->rx_bd_num * sizeof(struct qtnf_rx_bd);
-
-       vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
-       if (!vaddr)
-               return -ENOMEM;
-
-       /* tx bd */
-
-       memset(vaddr, 0, len);
-
-       priv->bd_table_vaddr = vaddr;
-       priv->bd_table_paddr = paddr;
-       priv->bd_table_len = len;
-
-       priv->tx_bd_vbase = vaddr;
-       priv->tx_bd_pbase = paddr;
-
-       pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
-
-       priv->tx_bd_r_index = 0;
-       priv->tx_bd_w_index = 0;
-
-       /* rx bd */
-
-       vaddr = ((struct qtnf_tx_bd *)vaddr) + priv->tx_bd_num;
-       paddr += priv->tx_bd_num * sizeof(struct qtnf_tx_bd);
-
-       priv->rx_bd_vbase = vaddr;
-       priv->rx_bd_pbase = paddr;
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       writel(QTN_HOST_HI32(paddr),
-              PCIE_HDP_TX_HOST_Q_BASE_H(priv->pcie_reg_base));
-#endif
-       writel(QTN_HOST_LO32(paddr),
-              PCIE_HDP_TX_HOST_Q_BASE_L(priv->pcie_reg_base));
-       writel(priv->rx_bd_num | (sizeof(struct qtnf_rx_bd)) << 16,
-              PCIE_HDP_TX_HOST_Q_SZ_CTRL(priv->pcie_reg_base));
-
-       pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
-
-       return 0;
-}
-
-static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 index)
-{
-       struct qtnf_rx_bd *rxbd;
-       struct sk_buff *skb;
-       dma_addr_t paddr;
-
-       skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
-       if (!skb) {
-               priv->rx_skb[index] = NULL;
-               return -ENOMEM;
-       }
-
-       priv->rx_skb[index] = skb;
-       rxbd = &priv->rx_bd_vbase[index];
-
-       paddr = pci_map_single(priv->pdev, skb->data,
-                              SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
-       if (pci_dma_mapping_error(priv->pdev, paddr)) {
-               pr_err("skb DMA mapping error: %pad\n", &paddr);
-               return -ENOMEM;
-       }
-
-       /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
-       rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
-       rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr));
-       rxbd->info = 0x0;
-
-       priv->rx_bd_w_index = index;
-
-       /* sync up all descriptor updates */
-       wmb();
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       writel(QTN_HOST_HI32(paddr),
-              PCIE_HDP_HHBM_BUF_PTR_H(priv->pcie_reg_base));
-#endif
-       writel(QTN_HOST_LO32(paddr),
-              PCIE_HDP_HHBM_BUF_PTR(priv->pcie_reg_base));
-
-       writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base));
-       return 0;
-}
-
-static int alloc_rx_buffers(struct qtnf_pcie_bus_priv *priv)
-{
-       u16 i;
-       int ret = 0;
-
-       memset(priv->rx_bd_vbase, 0x0,
-              priv->rx_bd_num * sizeof(struct qtnf_rx_bd));
-
-       for (i = 0; i < priv->rx_bd_num; i++) {
-               ret = skb2rbd_attach(priv, i);
-               if (ret)
-                       break;
-       }
-
-       return ret;
-}
-
-/* all rx/tx activity should have ceased before calling this function */
-static void qtnf_free_xfer_buffers(struct qtnf_pcie_bus_priv *priv)
-{
-       struct qtnf_tx_bd *txbd;
-       struct qtnf_rx_bd *rxbd;
-       struct sk_buff *skb;
-       dma_addr_t paddr;
-       int i;
-
-       /* free rx buffers */
-       for (i = 0; i < priv->rx_bd_num; i++) {
-               if (priv->rx_skb && priv->rx_skb[i]) {
-                       rxbd = &priv->rx_bd_vbase[i];
-                       skb = priv->rx_skb[i];
-                       paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
-                                             le32_to_cpu(rxbd->addr));
-                       pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
-                                        PCI_DMA_FROMDEVICE);
-                       dev_kfree_skb_any(skb);
-                       priv->rx_skb[i] = NULL;
-               }
-       }
-
-       /* free tx buffers */
-       for (i = 0; i < priv->tx_bd_num; i++) {
-               if (priv->tx_skb && priv->tx_skb[i]) {
-                       txbd = &priv->tx_bd_vbase[i];
-                       skb = priv->tx_skb[i];
-                       paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
-                                             le32_to_cpu(txbd->addr));
-                       pci_unmap_single(priv->pdev, paddr, skb->len,
-                                        PCI_DMA_TODEVICE);
-                       dev_kfree_skb_any(skb);
-                       priv->tx_skb[i] = NULL;
-               }
-       }
-}
-
-static int qtnf_hhbm_init(struct qtnf_pcie_bus_priv *priv)
-{
-       u32 val;
-
-       val = readl(PCIE_HHBM_CONFIG(priv->pcie_reg_base));
-       val |= HHBM_CONFIG_SOFT_RESET;
-       writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base));
-       usleep_range(50, 100);
-       val &= ~HHBM_CONFIG_SOFT_RESET;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       val |= HHBM_64BIT;
-#endif
-       writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base));
-       writel(priv->rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(priv->pcie_reg_base));
-
-       return 0;
-}
-
-static int qtnf_pcie_init_xfer(struct qtnf_pcie_bus_priv *priv)
-{
-       int ret;
-       u32 val;
-
-       priv->tx_bd_num = tx_bd_size_param;
-       priv->rx_bd_num = rx_bd_size_param;
-       priv->rx_bd_w_index = 0;
-       priv->rx_bd_r_index = 0;
-
-       if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) {
-               pr_err("tx_bd_size_param %u is not power of two\n",
-                      priv->tx_bd_num);
-               return -EINVAL;
-       }
-
-       val = priv->tx_bd_num * sizeof(struct qtnf_tx_bd);
-       if (val > PCIE_HHBM_MAX_SIZE) {
-               pr_err("tx_bd_size_param %u is too large\n",
-                      priv->tx_bd_num);
-               return -EINVAL;
-       }
-
-       if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) {
-               pr_err("rx_bd_size_param %u is not power of two\n",
-                      priv->rx_bd_num);
-               return -EINVAL;
-       }
-
-       val = priv->rx_bd_num * sizeof(dma_addr_t);
-       if (val > PCIE_HHBM_MAX_SIZE) {
-               pr_err("rx_bd_size_param %u is too large\n",
-                      priv->rx_bd_num);
-               return -EINVAL;
-       }
-
-       ret = qtnf_hhbm_init(priv);
-       if (ret) {
-               pr_err("failed to init h/w queues\n");
-               return ret;
-       }
-
-       ret = alloc_skb_array(priv);
-       if (ret) {
-               pr_err("failed to allocate skb array\n");
-               return ret;
-       }
-
-       ret = alloc_bd_table(priv);
-       if (ret) {
-               pr_err("failed to allocate bd table\n");
-               return ret;
-       }
-
-       ret = alloc_rx_buffers(priv);
-       if (ret) {
-               pr_err("failed to allocate rx buffers\n");
-               return ret;
-       }
-
-       return ret;
-}
-
-static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv)
-{
-       struct qtnf_tx_bd *txbd;
-       struct sk_buff *skb;
-       unsigned long flags;
-       dma_addr_t paddr;
-       u32 tx_done_index;
-       int count = 0;
-       int i;
-
-       spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
-
-       tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base))
-                       & (priv->tx_bd_num - 1);
-
-       i = priv->tx_bd_r_index;
-
-       while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
-               skb = priv->tx_skb[i];
-               if (likely(skb)) {
-                       txbd = &priv->tx_bd_vbase[i];
-                       paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
-                                             le32_to_cpu(txbd->addr));
-                       pci_unmap_single(priv->pdev, paddr, skb->len,
-                                        PCI_DMA_TODEVICE);
-
-                       if (skb->dev) {
-                               qtnf_update_tx_stats(skb->dev, skb);
-                               if (unlikely(priv->tx_stopped)) {
-                                       qtnf_wake_all_queues(skb->dev);
-                                       priv->tx_stopped = 0;
-                               }
-                       }
-
-                       dev_kfree_skb_any(skb);
-               }
-
-               priv->tx_skb[i] = NULL;
-               count++;
-
-               if (++i >= priv->tx_bd_num)
-                       i = 0;
-       }
-
-       priv->tx_reclaim_done += count;
-       priv->tx_reclaim_req++;
-       priv->tx_bd_r_index = i;
-
-       spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
-}
-
-static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv)
-{
-       if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
-                       priv->tx_bd_num)) {
-               qtnf_pcie_data_tx_reclaim(priv);
-
-               if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
-                               priv->tx_bd_num)) {
-                       pr_warn_ratelimited("reclaim full Tx queue\n");
-                       priv->tx_full_count++;
-                       return 0;
-               }
-       }
-
-       return 1;
-}
-
-static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
-{
-       struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
-       dma_addr_t txbd_paddr, skb_paddr;
-       struct qtnf_tx_bd *txbd;
-       unsigned long flags;
-       int len, i;
-       u32 info;
-       int ret = 0;
-
-       spin_lock_irqsave(&priv->tx0_lock, flags);
-
-       if (!qtnf_tx_queue_ready(priv)) {
-               if (skb->dev) {
-                       netif_tx_stop_all_queues(skb->dev);
-                       priv->tx_stopped = 1;
-               }
-
-               spin_unlock_irqrestore(&priv->tx0_lock, flags);
-               return NETDEV_TX_BUSY;
-       }
-
-       i = priv->tx_bd_w_index;
-       priv->tx_skb[i] = skb;
-       len = skb->len;
-
-       skb_paddr = pci_map_single(priv->pdev, skb->data,
-                                  skb->len, PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
-               pr_err("skb DMA mapping error: %pad\n", &skb_paddr);
-               ret = -ENOMEM;
-               goto tx_done;
-       }
-
-       txbd = &priv->tx_bd_vbase[i];
-       txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
-       txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr));
-
-       info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT;
-       txbd->info = cpu_to_le32(info);
-
-       /* sync up all descriptor updates before passing them to EP */
-       dma_wmb();
-
-       /* write new TX descriptor to PCIE_RX_FIFO on EP */
-       txbd_paddr = priv->tx_bd_pbase + i * sizeof(struct qtnf_tx_bd);
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       writel(QTN_HOST_HI32(txbd_paddr),
-              PCIE_HDP_HOST_WR_DESC0_H(priv->pcie_reg_base));
-#endif
-       writel(QTN_HOST_LO32(txbd_paddr),
-              PCIE_HDP_HOST_WR_DESC0(priv->pcie_reg_base));
-
-       if (++i >= priv->tx_bd_num)
-               i = 0;
-
-       priv->tx_bd_w_index = i;
-
-tx_done:
-       if (ret && skb) {
-               pr_err_ratelimited("drop skb\n");
-               if (skb->dev)
-                       skb->dev->stats.tx_dropped++;
-               dev_kfree_skb_any(skb);
-       }
-
-       priv->tx_done_count++;
-       spin_unlock_irqrestore(&priv->tx0_lock, flags);
-
-       qtnf_pcie_data_tx_reclaim(priv);
-
-       return NETDEV_TX_OK;
-}
-
-static int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
-{
-       struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
-       int ret;
-
-       ret = qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len);
-
-       if (ret == -ETIMEDOUT) {
-               pr_err("EP firmware is dead\n");
-               bus->fw_state = QTNF_FW_STATE_EP_DEAD;
-       }
-
-       return ret;
-}
-
-static irqreturn_t qtnf_interrupt(int irq, void *data)
-{
-       struct qtnf_bus *bus = (struct qtnf_bus *)data;
-       struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
-       u32 status;
-
-       priv->pcie_irq_count++;
-       status = readl(PCIE_HDP_INT_STATUS(priv->pcie_reg_base));
-
-       qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
-       qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
-
-       if (!(status & priv->pcie_irq_mask))
-               goto irq_done;
-
-       if (status & PCIE_HDP_INT_RX_BITS)
-               priv->pcie_irq_rx_count++;
-
-       if (status & PCIE_HDP_INT_TX_BITS)
-               priv->pcie_irq_tx_count++;
-
-       if (status & PCIE_HDP_INT_HHBM_UF)
-               priv->pcie_irq_uf_count++;
-
-       if (status & PCIE_HDP_INT_RX_BITS) {
-               qtnf_dis_rxdone_irq(priv);
-               napi_schedule(&bus->mux_napi);
-       }
-
-       if (status & PCIE_HDP_INT_TX_BITS) {
-               qtnf_dis_txdone_irq(priv);
-               tasklet_hi_schedule(&priv->reclaim_tq);
-       }
-
-irq_done:
-       /* H/W workaround: clean all bits, not only enabled */
-       qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(priv->pcie_reg_base));
-
-       if (!priv->msi_enabled)
-               qtnf_deassert_intx(priv);
-
-       return IRQ_HANDLED;
-}
-
-static int qtnf_rx_data_ready(struct qtnf_pcie_bus_priv *priv)
-{
-       u16 index = priv->rx_bd_r_index;
-       struct qtnf_rx_bd *rxbd;
-       u32 descw;
-
-       rxbd = &priv->rx_bd_vbase[index];
-       descw = le32_to_cpu(rxbd->info);
-
-       if (descw & QTN_TXDONE_MASK)
-               return 1;
-
-       return 0;
-}
-
-static int qtnf_rx_poll(struct napi_struct *napi, int budget)
-{
-       struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
-       struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
-       struct net_device *ndev = NULL;
-       struct sk_buff *skb = NULL;
-       int processed = 0;
-       struct qtnf_rx_bd *rxbd;
-       dma_addr_t skb_paddr;
-       int consume;
-       u32 descw;
-       u32 psize;
-       u16 r_idx;
-       u16 w_idx;
-       int ret;
-
-       while (processed < budget) {
-
-
-               if (!qtnf_rx_data_ready(priv))
-                       goto rx_out;
-
-               r_idx = priv->rx_bd_r_index;
-               rxbd = &priv->rx_bd_vbase[r_idx];
-               descw = le32_to_cpu(rxbd->info);
-
-               skb = priv->rx_skb[r_idx];
-               psize = QTN_GET_LEN(descw);
-               consume = 1;
-
-               if (!(descw & QTN_TXDONE_MASK)) {
-                       pr_warn("skip invalid rxbd[%d]\n", r_idx);
-                       consume = 0;
-               }
-
-               if (!skb) {
-                       pr_warn("skip missing rx_skb[%d]\n", r_idx);
-                       consume = 0;
-               }
-
-               if (skb && (skb_tailroom(skb) <  psize)) {
-                       pr_err("skip packet with invalid length: %u > %u\n",
-                              psize, skb_tailroom(skb));
-                       consume = 0;
-               }
-
-               if (skb) {
-                       skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
-                                                 le32_to_cpu(rxbd->addr));
-                       pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
-                                        PCI_DMA_FROMDEVICE);
-               }
-
-               if (consume) {
-                       skb_put(skb, psize);
-                       ndev = qtnf_classify_skb(bus, skb);
-                       if (likely(ndev)) {
-                               qtnf_update_rx_stats(ndev, skb);
-                               skb->protocol = eth_type_trans(skb, ndev);
-                               napi_gro_receive(napi, skb);
-                       } else {
-                               pr_debug("drop untagged skb\n");
-                               bus->mux_dev.stats.rx_dropped++;
-                               dev_kfree_skb_any(skb);
-                       }
-               } else {
-                       if (skb) {
-                               bus->mux_dev.stats.rx_dropped++;
-                               dev_kfree_skb_any(skb);
-                       }
-               }
-
-               priv->rx_skb[r_idx] = NULL;
-               if (++r_idx >= priv->rx_bd_num)
-                       r_idx = 0;
-
-               priv->rx_bd_r_index = r_idx;
-
-               /* repalce processed buffer by a new one */
-               w_idx = priv->rx_bd_w_index;
-               while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
-                                 priv->rx_bd_num) > 0) {
-                       if (++w_idx >= priv->rx_bd_num)
-                               w_idx = 0;
-
-                       ret = skb2rbd_attach(priv, w_idx);
-                       if (ret) {
-                               pr_err("failed to allocate new rx_skb[%d]\n",
-                                      w_idx);
-                               break;
-                       }
-               }
-
-               processed++;
-       }
-
-rx_out:
-       if (processed < budget) {
-               napi_complete(napi);
-               qtnf_en_rxdone_irq(priv);
-       }
-
-       return processed;
-}
-
-static void
-qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
-{
-       struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
-
-       tasklet_hi_schedule(&priv->reclaim_tq);
-}
-
-static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
-{
-       struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
-
-       qtnf_enable_hdp_irqs(priv);
-       napi_enable(&bus->mux_napi);
-}
-
-static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
-{
-       struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
-
-       napi_disable(&bus->mux_napi);
-       qtnf_disable_hdp_irqs(priv);
-}
-
-static const struct qtnf_bus_ops qtnf_pcie_bus_ops = {
-       /* control path methods */
-       .control_tx     = qtnf_pcie_control_tx,
-
-       /* data path methods */
-       .data_tx                = qtnf_pcie_data_tx,
-       .data_tx_timeout        = qtnf_pcie_data_tx_timeout,
-       .data_rx_start          = qtnf_pcie_data_rx_start,
-       .data_rx_stop           = qtnf_pcie_data_rx_stop,
-};
-
-static int qtnf_dbg_mps_show(struct seq_file *s, void *data)
-{
-       struct qtnf_bus *bus = dev_get_drvdata(s->private);
-       struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
-
-       seq_printf(s, "%d\n", priv->mps);
-
-       return 0;
-}
-
-static int qtnf_dbg_msi_show(struct seq_file *s, void *data)
-{
-       struct qtnf_bus *bus = dev_get_drvdata(s->private);
-       struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
-
-       seq_printf(s, "%u\n", priv->msi_enabled);
-
-       return 0;
-}
-
-static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
-{
-       struct qtnf_bus *bus = dev_get_drvdata(s->private);
-       struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
-       u32 reg = readl(PCIE_HDP_INT_EN(priv->pcie_reg_base));
-       u32 status;
-
-       seq_printf(s, "pcie_irq_count(%u)\n", priv->pcie_irq_count);
-       seq_printf(s, "pcie_irq_tx_count(%u)\n", priv->pcie_irq_tx_count);
-       status = reg &  PCIE_HDP_INT_TX_BITS;
-       seq_printf(s, "pcie_irq_tx_status(%s)\n",
-                  (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS");
-       seq_printf(s, "pcie_irq_rx_count(%u)\n", priv->pcie_irq_rx_count);
-       status = reg &  PCIE_HDP_INT_RX_BITS;
-       seq_printf(s, "pcie_irq_rx_status(%s)\n",
-                  (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS");
-       seq_printf(s, "pcie_irq_uf_count(%u)\n", priv->pcie_irq_uf_count);
-       status = reg &  PCIE_HDP_INT_HHBM_UF;
-       seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n",
-                  (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS");
-
-       return 0;
-}
-
-static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
-{
-       struct qtnf_bus *bus = dev_get_drvdata(s->private);
-       struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
-
-       seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
-       seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
-       seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
-       seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
-
-       seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
-       seq_printf(s, "tx_bd_p_index(%u)\n",
-                  readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base))
-                       & (priv->tx_bd_num - 1));
-       seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
-       seq_printf(s, "tx queue len(%u)\n",
-                  CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
-                           priv->tx_bd_num));
-
-       seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
-       seq_printf(s, "rx_bd_p_index(%u)\n",
-                  readl(PCIE_HDP_TX0DMA_CNT(priv->pcie_reg_base))
-                       & (priv->rx_bd_num - 1));
-       seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
-       seq_printf(s, "rx alloc queue len(%u)\n",
-                  CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
-                             priv->rx_bd_num));
-
-       return 0;
-}
-
-static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
-{
-       struct qtnf_bus *bus = dev_get_drvdata(s->private);
-       struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
-
-       seq_printf(s, "shm_ipc_ep_in.tx_packet_count(%zu)\n",
-                  priv->shm_ipc_ep_in.tx_packet_count);
-       seq_printf(s, "shm_ipc_ep_in.rx_packet_count(%zu)\n",
-                  priv->shm_ipc_ep_in.rx_packet_count);
-       seq_printf(s, "shm_ipc_ep_out.tx_packet_count(%zu)\n",
-                  priv->shm_ipc_ep_out.tx_timeout_count);
-       seq_printf(s, "shm_ipc_ep_out.rx_packet_count(%zu)\n",
-                  priv->shm_ipc_ep_out.rx_packet_count);
-
-       return 0;
-}
-
-static int qtnf_ep_fw_send(struct qtnf_pcie_bus_priv *priv, uint32_t size,
-                          int blk, const u8 *pblk, const u8 *fw)
-{
-       struct pci_dev *pdev = priv->pdev;
-       struct qtnf_bus *bus = pci_get_drvdata(pdev);
-
-       struct qtnf_pcie_fw_hdr *hdr;
-       u8 *pdata;
-
-       int hds = sizeof(*hdr);
-       struct sk_buff *skb = NULL;
-       int len = 0;
-       int ret;
-
-       skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL);
-       if (!skb)
-               return -ENOMEM;
-
-       skb->len = QTN_PCIE_FW_BUFSZ;
-       skb->dev = NULL;
-
-       hdr = (struct qtnf_pcie_fw_hdr *)skb->data;
-       memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG));
-       hdr->fwsize = cpu_to_le32(size);
-       hdr->seqnum = cpu_to_le32(blk);
-
-       if (blk)
-               hdr->type = cpu_to_le32(QTN_FW_DSUB);
-       else
-               hdr->type = cpu_to_le32(QTN_FW_DBEGIN);
-
-       pdata = skb->data + hds;
-
-       len = QTN_PCIE_FW_BUFSZ - hds;
-       if (pblk >= (fw + size - len)) {
-               len = fw + size - pblk;
-               hdr->type = cpu_to_le32(QTN_FW_DEND);
-       }
-
-       hdr->pktlen = cpu_to_le32(len);
-       memcpy(pdata, pblk, len);
-       hdr->crc = cpu_to_le32(~crc32(0, pdata, len));
-
-       ret = qtnf_pcie_data_tx(bus, skb);
-
-       return (ret == NETDEV_TX_OK) ? len : 0;
-}
-
-static int
-qtnf_ep_fw_load(struct qtnf_pcie_bus_priv *priv, const u8 *fw, u32 fw_size)
-{
-       int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pcie_fw_hdr);
-       int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0);
-       const u8 *pblk = fw;
-       int threshold = 0;
-       int blk = 0;
-       int len;
-
-       pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size);
-
-       while (blk < blk_count) {
-               if (++threshold > 10000) {
-                       pr_err("FW upload failed: too many retries\n");
-                       return -ETIMEDOUT;
-               }
-
-               len = qtnf_ep_fw_send(priv, fw_size, blk, pblk, fw);
-               if (len <= 0)
-                       continue;
-
-               if (!((blk + 1) & QTN_PCIE_FW_DLMASK) ||
-                   (blk == (blk_count - 1))) {
-                       qtnf_set_state(&priv->bda->bda_rc_state,
-                                      QTN_RC_FW_SYNC);
-                       if (qtnf_poll_state(&priv->bda->bda_ep_state,
-                                           QTN_EP_FW_SYNC,
-                                           QTN_FW_DL_TIMEOUT_MS)) {
-                               pr_err("FW upload failed: SYNC timed out\n");
-                               return -ETIMEDOUT;
-                       }
-
-                       qtnf_clear_state(&priv->bda->bda_ep_state,
-                                        QTN_EP_FW_SYNC);
-
-                       if (qtnf_is_state(&priv->bda->bda_ep_state,
-                                         QTN_EP_FW_RETRY)) {
-                               if (blk == (blk_count - 1)) {
-                                       int last_round =
-                                               blk_count & QTN_PCIE_FW_DLMASK;
-                                       blk -= last_round;
-                                       pblk -= ((last_round - 1) *
-                                               blk_size + len);
-                               } else {
-                                       blk -= QTN_PCIE_FW_DLMASK;
-                                       pblk -= QTN_PCIE_FW_DLMASK * blk_size;
-                               }
-
-                               qtnf_clear_state(&priv->bda->bda_ep_state,
-                                                QTN_EP_FW_RETRY);
-
-                               pr_warn("FW upload retry: block #%d\n", blk);
-                               continue;
-                       }
-
-                       qtnf_pcie_data_tx_reclaim(priv);
-               }
-
-               pblk += len;
-               blk++;
-       }
-
-       pr_debug("FW upload completed: totally sent %d blocks\n", blk);
-       return 0;
-}
-
-static void qtnf_fw_work_handler(struct work_struct *work)
-{
-       struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
-       struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
-       struct pci_dev *pdev = priv->pdev;
-       const struct firmware *fw;
-       int ret;
-       u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
-
-       if (flashboot) {
-               state |= QTN_RC_FW_FLASHBOOT;
-       } else {
-               ret = request_firmware(&fw, bus->fwname, &pdev->dev);
-               if (ret < 0) {
-                       pr_err("failed to get firmware %s\n", bus->fwname);
-                       goto fw_load_fail;
-               }
-       }
-
-       qtnf_set_state(&priv->bda->bda_rc_state, state);
-
-       if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY,
-                           QTN_FW_DL_TIMEOUT_MS)) {
-               pr_err("card is not ready\n");
-
-               if (!flashboot)
-                       release_firmware(fw);
-
-               goto fw_load_fail;
-       }
-
-       qtnf_clear_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY);
-
-       if (flashboot) {
-               pr_info("booting firmware from flash\n");
-       } else {
-               pr_info("starting firmware upload: %s\n", bus->fwname);
-
-               ret = qtnf_ep_fw_load(priv, fw->data, fw->size);
-               release_firmware(fw);
-               if (ret) {
-                       pr_err("firmware upload error\n");
-                       goto fw_load_fail;
-               }
-       }
-
-       if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_DONE,
-                           QTN_FW_DL_TIMEOUT_MS)) {
-               pr_err("firmware bringup timed out\n");
-               goto fw_load_fail;
-       }
-
-       bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE;
-       pr_info("firmware is up and running\n");
-
-       if (qtnf_poll_state(&priv->bda->bda_ep_state,
-                           QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) {
-               pr_err("firmware runtime failure\n");
-               goto fw_load_fail;
-       }
-
-       ret = qtnf_core_attach(bus);
-       if (ret) {
-               pr_err("failed to attach core\n");
-               goto fw_load_fail;
-       }
-
-       qtnf_debugfs_init(bus, DRV_NAME);
-       qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
-       qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
-       qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
-       qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
-       qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
-
-       goto fw_load_exit;
-
-fw_load_fail:
-       bus->fw_state = QTNF_FW_STATE_DETACHED;
-
-fw_load_exit:
-       complete(&bus->firmware_init_complete);
-       put_device(&pdev->dev);
-}
-
-static void qtnf_bringup_fw_async(struct qtnf_bus *bus)
-{
-       struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
-       struct pci_dev *pdev = priv->pdev;
-
-       get_device(&pdev->dev);
-       INIT_WORK(&bus->fw_work, qtnf_fw_work_handler);
-       schedule_work(&bus->fw_work);
-}
-
-static void qtnf_reclaim_tasklet_fn(unsigned long data)
-{
-       struct qtnf_pcie_bus_priv *priv = (void *)data;
-
-       qtnf_pcie_data_tx_reclaim(priv);
-       qtnf_en_txdone_irq(priv);
-}
-
-static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
-       struct qtnf_pcie_bus_priv *pcie_priv;
-       struct qtnf_bus *bus;
-       int ret;
-
-       bus = devm_kzalloc(&pdev->dev,
-                          sizeof(*bus) + sizeof(*pcie_priv), GFP_KERNEL);
-       if (!bus)
-               return -ENOMEM;
-
-       pcie_priv = get_bus_priv(bus);
-
-       pci_set_drvdata(pdev, bus);
-       bus->bus_ops = &qtnf_pcie_bus_ops;
-       bus->dev = &pdev->dev;
-       bus->fw_state = QTNF_FW_STATE_RESET;
-       pcie_priv->pdev = pdev;
-
-       strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME);
-       init_completion(&bus->firmware_init_complete);
-       mutex_init(&bus->bus_lock);
-       spin_lock_init(&pcie_priv->tx0_lock);
-       spin_lock_init(&pcie_priv->irq_lock);
-       spin_lock_init(&pcie_priv->tx_reclaim_lock);
-
-       /* init stats */
-       pcie_priv->tx_full_count = 0;
-       pcie_priv->tx_done_count = 0;
-       pcie_priv->pcie_irq_count = 0;
-       pcie_priv->pcie_irq_rx_count = 0;
-       pcie_priv->pcie_irq_tx_count = 0;
-       pcie_priv->pcie_irq_uf_count = 0;
-       pcie_priv->tx_reclaim_done = 0;
-       pcie_priv->tx_reclaim_req = 0;
-
-       tasklet_init(&pcie_priv->reclaim_tq, qtnf_reclaim_tasklet_fn,
-                    (unsigned long)pcie_priv);
-
-       init_dummy_netdev(&bus->mux_dev);
-       netif_napi_add(&bus->mux_dev, &bus->mux_napi,
-                      qtnf_rx_poll, 10);
-
-       pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PEARL_PCIE");
-       if (!pcie_priv->workqueue) {
-               pr_err("failed to alloc bus workqueue\n");
-               ret = -ENODEV;
-               goto err_init;
-       }
-
-       if (!pci_is_pcie(pdev)) {
-               pr_err("device %s is not PCI Express\n", pci_name(pdev));
-               ret = -EIO;
-               goto err_base;
-       }
-
-       qtnf_tune_pcie_mps(pcie_priv);
-
-       ret = pcim_enable_device(pdev);
-       if (ret) {
-               pr_err("failed to init PCI device %x\n", pdev->device);
-               goto err_base;
-       } else {
-               pr_debug("successful init of PCI device %x\n", pdev->device);
-       }
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-#else
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-#endif
-       if (ret) {
-               pr_err("PCIE DMA coherent mask init failed\n");
-               goto err_base;
-       }
-
-       pci_set_master(pdev);
-       qtnf_pcie_init_irq(pcie_priv);
-
-       ret = qtnf_pcie_init_memory(pcie_priv);
-       if (ret < 0) {
-               pr_err("PCIE memory init failed\n");
-               goto err_base;
-       }
-
-       pci_save_state(pdev);
-
-       ret = qtnf_pcie_init_shm_ipc(pcie_priv);
-       if (ret < 0) {
-               pr_err("PCIE SHM IPC init failed\n");
-               goto err_base;
-       }
-
-       ret = qtnf_pcie_init_xfer(pcie_priv);
-       if (ret) {
-               pr_err("PCIE xfer init failed\n");
-               goto err_ipc;
-       }
-
-       /* init default irq settings */
-       qtnf_init_hdp_irqs(pcie_priv);
-
-       /* start with disabled irqs */
-       qtnf_disable_hdp_irqs(pcie_priv);
-
-       ret = devm_request_irq(&pdev->dev, pdev->irq, &qtnf_interrupt, 0,
-                              "qtnf_pcie_irq", (void *)bus);
-       if (ret) {
-               pr_err("failed to request pcie irq %d\n", pdev->irq);
-               goto err_xfer;
-       }
-
-       qtnf_bringup_fw_async(bus);
-
-       return 0;
-
-err_xfer:
-       qtnf_free_xfer_buffers(pcie_priv);
-
-err_ipc:
-       qtnf_pcie_free_shm_ipc(pcie_priv);
-
-err_base:
-       flush_workqueue(pcie_priv->workqueue);
-       destroy_workqueue(pcie_priv->workqueue);
-       netif_napi_del(&bus->mux_napi);
-
-err_init:
-       tasklet_kill(&pcie_priv->reclaim_tq);
-       pci_set_drvdata(pdev, NULL);
-
-       return ret;
-}
-
-static void qtnf_pcie_remove(struct pci_dev *pdev)
-{
-       struct qtnf_pcie_bus_priv *priv;
-       struct qtnf_bus *bus;
-
-       bus = pci_get_drvdata(pdev);
-       if (!bus)
-               return;
-
-       wait_for_completion(&bus->firmware_init_complete);
-
-       if (bus->fw_state == QTNF_FW_STATE_ACTIVE ||
-           bus->fw_state == QTNF_FW_STATE_EP_DEAD)
-               qtnf_core_detach(bus);
-
-       priv = get_bus_priv(bus);
-
-       netif_napi_del(&bus->mux_napi);
-       flush_workqueue(priv->workqueue);
-       destroy_workqueue(priv->workqueue);
-       tasklet_kill(&priv->reclaim_tq);
-
-       qtnf_free_xfer_buffers(priv);
-       qtnf_debugfs_remove(bus);
-
-       qtnf_pcie_free_shm_ipc(priv);
-       qtnf_reset_card(priv);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int qtnf_pcie_suspend(struct device *dev)
-{
-       return -EOPNOTSUPP;
-}
-
-static int qtnf_pcie_resume(struct device *dev)
-{
-       return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_PM_SLEEP
-/* Power Management Hooks */
-static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend,
-                        qtnf_pcie_resume);
-#endif
-
-static const struct pci_device_id qtnf_pcie_devid_table[] = {
-       {
-               PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-       },
-       { },
-};
-
-MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table);
-
-static struct pci_driver qtnf_pcie_drv_data = {
-       .name = DRV_NAME,
-       .id_table = qtnf_pcie_devid_table,
-       .probe = qtnf_pcie_probe,
-       .remove = qtnf_pcie_remove,
-#ifdef CONFIG_PM_SLEEP
-       .driver = {
-               .pm = &qtnf_pcie_pm_ops,
-       },
-#endif
-};
-
-static int __init qtnf_pcie_register(void)
-{
-       pr_info("register Quantenna QSR10g FullMAC PCIE driver\n");
-       return pci_register_driver(&qtnf_pcie_drv_data);
-}
-
-static void __exit qtnf_pcie_exit(void)
-{
-       pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n");
-       pci_unregister_driver(&qtnf_pcie_drv_data);
-}
-
-module_init(qtnf_pcie_register);
-module_exit(qtnf_pcie_exit);
-
-MODULE_AUTHOR("Quantenna Communications");
-MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
deleted file mode 100644 (file)
index 397875a..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _QTN_FMAC_PCIE_H_
-#define _QTN_FMAC_PCIE_H_
-
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-
-#include "pcie_regs_pearl.h"
-#include "pcie_ipc.h"
-#include "shm_ipc.h"
-
-struct bus;
-
-struct qtnf_pcie_bus_priv {
-       struct pci_dev  *pdev;
-
-       /* lock for irq configuration changes */
-       spinlock_t irq_lock;
-
-       /* lock for tx reclaim operations */
-       spinlock_t tx_reclaim_lock;
-       /* lock for tx0 operations */
-       spinlock_t tx0_lock;
-       u8 msi_enabled;
-       u8 tx_stopped;
-       int mps;
-
-       struct workqueue_struct *workqueue;
-       struct tasklet_struct reclaim_tq;
-
-       void __iomem *sysctl_bar;
-       void __iomem *epmem_bar;
-       void __iomem *dmareg_bar;
-
-       struct qtnf_shm_ipc shm_ipc_ep_in;
-       struct qtnf_shm_ipc shm_ipc_ep_out;
-
-       struct qtnf_pcie_bda __iomem *bda;
-       void __iomem *pcie_reg_base;
-
-       u16 tx_bd_num;
-       u16 rx_bd_num;
-
-       struct sk_buff **tx_skb;
-       struct sk_buff **rx_skb;
-
-       struct qtnf_tx_bd *tx_bd_vbase;
-       dma_addr_t tx_bd_pbase;
-
-       struct qtnf_rx_bd *rx_bd_vbase;
-       dma_addr_t rx_bd_pbase;
-
-       dma_addr_t bd_table_paddr;
-       void *bd_table_vaddr;
-       u32 bd_table_len;
-
-       u32 rx_bd_w_index;
-       u32 rx_bd_r_index;
-
-       u32 tx_bd_w_index;
-       u32 tx_bd_r_index;
-
-       u32 pcie_irq_mask;
-
-       /* diagnostics stats */
-       u32 pcie_irq_count;
-       u32 pcie_irq_rx_count;
-       u32 pcie_irq_tx_count;
-       u32 pcie_irq_uf_count;
-       u32 tx_full_count;
-       u32 tx_done_count;
-       u32 tx_reclaim_done;
-       u32 tx_reclaim_req;
-};
-
-#endif /* _QTN_FMAC_PCIE_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h
deleted file mode 100644 (file)
index 00bb21a..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _QTN_FMAC_PCIE_IPC_H_
-#define _QTN_FMAC_PCIE_IPC_H_
-
-#include <linux/types.h>
-
-#include "shm_ipc_defs.h"
-
-/* bitmap for EP status and flags: updated by EP, read by RC */
-#define QTN_EP_HAS_UBOOT       BIT(0)
-#define QTN_EP_HAS_FIRMWARE    BIT(1)
-#define QTN_EP_REQ_UBOOT       BIT(2)
-#define QTN_EP_REQ_FIRMWARE    BIT(3)
-#define QTN_EP_ERROR_UBOOT     BIT(4)
-#define QTN_EP_ERROR_FIRMWARE  BIT(5)
-
-#define QTN_EP_FW_LOADRDY      BIT(8)
-#define QTN_EP_FW_SYNC         BIT(9)
-#define QTN_EP_FW_RETRY                BIT(10)
-#define QTN_EP_FW_QLINK_DONE   BIT(15)
-#define QTN_EP_FW_DONE         BIT(16)
-
-/* bitmap for RC status and flags: updated by RC, read by EP */
-#define QTN_RC_PCIE_LINK       BIT(0)
-#define QTN_RC_NET_LINK                BIT(1)
-#define QTN_RC_FW_FLASHBOOT    BIT(5)
-#define QTN_RC_FW_QLINK                BIT(7)
-#define QTN_RC_FW_LOADRDY      BIT(8)
-#define QTN_RC_FW_SYNC         BIT(9)
-
-/* state transition timeouts */
-#define QTN_FW_DL_TIMEOUT_MS   3000
-#define QTN_FW_QLINK_TIMEOUT_MS        30000
-#define QTN_EP_RESET_WAIT_MS   1000
-
-#define PCIE_HDP_INT_RX_BITS (0                \
-       | PCIE_HDP_INT_EP_TXDMA         \
-       | PCIE_HDP_INT_EP_TXEMPTY       \
-       | PCIE_HDP_INT_HHBM_UF          \
-       )
-
-#define PCIE_HDP_INT_TX_BITS (0                \
-       | PCIE_HDP_INT_EP_RXDMA         \
-       )
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-#define QTN_HOST_HI32(a)       ((u32)(((u64)a) >> 32))
-#define QTN_HOST_LO32(a)       ((u32)(((u64)a) & 0xffffffffUL))
-#define QTN_HOST_ADDR(h, l)    ((((u64)h) << 32) | ((u64)l))
-#else
-#define QTN_HOST_HI32(a)       0
-#define QTN_HOST_LO32(a)       ((u32)(((u32)a) & 0xffffffffUL))
-#define QTN_HOST_ADDR(h, l)    ((u32)l)
-#endif
-
-#define QTN_SYSCTL_BAR 0
-#define QTN_SHMEM_BAR  2
-#define QTN_DMA_BAR    3
-
-#define QTN_PCIE_BDA_VERSION           0x1002
-
-#define PCIE_BDA_NAMELEN               32
-#define PCIE_HHBM_MAX_SIZE             2048
-
-#define SKB_BUF_SIZE           2048
-
-#define QTN_PCIE_BOARDFLG      "PCIEQTN"
-#define QTN_PCIE_FW_DLMASK     0xF
-#define QTN_PCIE_FW_BUFSZ      2048
-
-#define QTN_ENET_ADDR_LENGTH   6
-
-#define QTN_TXDONE_MASK                ((u32)0x80000000)
-#define QTN_GET_LEN(x)         ((x) & 0xFFFF)
-
-#define QTN_PCIE_TX_DESC_LEN_MASK      0xFFFF
-#define QTN_PCIE_TX_DESC_LEN_SHIFT     0
-#define QTN_PCIE_TX_DESC_PORT_MASK     0xF
-#define QTN_PCIE_TX_DESC_PORT_SHIFT    16
-#define QTN_PCIE_TX_DESC_TQE_BIT       BIT(24)
-
-#define QTN_EP_LHOST_TQE_PORT  4
-
-enum qtnf_pcie_bda_ipc_flags {
-       QTN_PCIE_IPC_FLAG_HBM_MAGIC     = BIT(0),
-       QTN_PCIE_IPC_FLAG_SHM_PIO       = BIT(1),
-};
-
-struct qtnf_pcie_bda {
-       __le16 bda_len;
-       __le16 bda_version;
-       __le32 bda_pci_endian;
-       __le32 bda_ep_state;
-       __le32 bda_rc_state;
-       __le32 bda_dma_mask;
-       __le32 bda_msi_addr;
-       __le32 bda_flashsz;
-       u8 bda_boardname[PCIE_BDA_NAMELEN];
-       __le32 bda_rc_msi_enabled;
-       u8 bda_hhbm_list[PCIE_HHBM_MAX_SIZE];
-       __le32 bda_dsbw_start_index;
-       __le32 bda_dsbw_end_index;
-       __le32 bda_dsbw_total_bytes;
-       __le32 bda_rc_tx_bd_base;
-       __le32 bda_rc_tx_bd_num;
-       u8 bda_pcie_mac[QTN_ENET_ADDR_LENGTH];
-       struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096); /* host TX */
-       struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096); /* host RX */
-} __packed;
-
-struct qtnf_tx_bd {
-       __le32 addr;
-       __le32 addr_h;
-       __le32 info;
-       __le32 info_h;
-} __packed;
-
-struct qtnf_rx_bd {
-       __le32 addr;
-       __le32 addr_h;
-       __le32 info;
-       __le32 info_h;
-       __le32 next_ptr;
-       __le32 next_ptr_h;
-} __packed;
-
-enum qtnf_fw_loadtype {
-       QTN_FW_DBEGIN,
-       QTN_FW_DSUB,
-       QTN_FW_DEND,
-       QTN_FW_CTRL
-};
-
-struct qtnf_pcie_fw_hdr {
-       u8 boardflg[8];
-       __le32 fwsize;
-       __le32 seqnum;
-       __le32 type;
-       __le32 pktlen;
-       __le32 crc;
-} __packed;
-
-#endif /* _QTN_FMAC_PCIE_IPC_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h
deleted file mode 100644 (file)
index 0bfe285..0000000
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Copyright (c) 2015 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __PEARL_PCIE_H
-#define __PEARL_PCIE_H
-
-#define        PCIE_GEN2_BASE                          (0xe9000000)
-#define        PCIE_GEN3_BASE                          (0xe7000000)
-
-#define PEARL_CUR_PCIE_BASE                    (PCIE_GEN2_BASE)
-#define PCIE_HDP_OFFSET                                (0x2000)
-
-#define PCIE_HDP_CTRL(base)                    ((base) + 0x2c00)
-#define PCIE_HDP_AXI_CTRL(base)                        ((base) + 0x2c04)
-#define PCIE_HDP_HOST_WR_DESC0(base)           ((base) + 0x2c10)
-#define PCIE_HDP_HOST_WR_DESC0_H(base)         ((base) + 0x2c14)
-#define PCIE_HDP_HOST_WR_DESC1(base)           ((base) + 0x2c18)
-#define PCIE_HDP_HOST_WR_DESC1_H(base)         ((base) + 0x2c1c)
-#define PCIE_HDP_HOST_WR_DESC2(base)           ((base) + 0x2c20)
-#define PCIE_HDP_HOST_WR_DESC2_H(base)         ((base) + 0x2c24)
-#define PCIE_HDP_HOST_WR_DESC3(base)           ((base) + 0x2c28)
-#define PCIE_HDP_HOST_WR_DESC4_H(base)         ((base) + 0x2c2c)
-#define PCIE_HDP_RX_INT_CTRL(base)             ((base) + 0x2c30)
-#define PCIE_HDP_TX_INT_CTRL(base)             ((base) + 0x2c34)
-#define PCIE_HDP_INT_STATUS(base)              ((base) + 0x2c38)
-#define PCIE_HDP_INT_EN(base)                  ((base) + 0x2c3c)
-#define PCIE_HDP_RX_DESC0_PTR(base)            ((base) + 0x2c40)
-#define PCIE_HDP_RX_DESC0_NOE(base)            ((base) + 0x2c44)
-#define PCIE_HDP_RX_DESC1_PTR(base)            ((base) + 0x2c48)
-#define PCIE_HDP_RX_DESC1_NOE(base)            ((base) + 0x2c4c)
-#define PCIE_HDP_RX_DESC2_PTR(base)            ((base) + 0x2c50)
-#define PCIE_HDP_RX_DESC2_NOE(base)            ((base) + 0x2c54)
-#define PCIE_HDP_RX_DESC3_PTR(base)            ((base) + 0x2c58)
-#define PCIE_HDP_RX_DESC3_NOE(base)            ((base) + 0x2c5c)
-
-#define PCIE_HDP_TX0_BASE_ADDR(base)           ((base) + 0x2c60)
-#define PCIE_HDP_TX1_BASE_ADDR(base)           ((base) + 0x2c64)
-#define PCIE_HDP_TX0_Q_CTRL(base)              ((base) + 0x2c70)
-#define PCIE_HDP_TX1_Q_CTRL(base)              ((base) + 0x2c74)
-#define PCIE_HDP_CFG0(base)                    ((base) + 0x2c80)
-#define PCIE_HDP_CFG1(base)                    ((base) + 0x2c84)
-#define PCIE_HDP_CFG2(base)                    ((base) + 0x2c88)
-#define PCIE_HDP_CFG3(base)                    ((base) + 0x2c8c)
-#define PCIE_HDP_CFG4(base)                    ((base) + 0x2c90)
-#define PCIE_HDP_CFG5(base)                    ((base) + 0x2c94)
-#define PCIE_HDP_CFG6(base)                    ((base) + 0x2c98)
-#define PCIE_HDP_CFG7(base)                    ((base) + 0x2c9c)
-#define PCIE_HDP_CFG8(base)                    ((base) + 0x2ca0)
-#define PCIE_HDP_CFG9(base)                    ((base) + 0x2ca4)
-#define PCIE_HDP_CFG10(base)                   ((base) + 0x2ca8)
-#define PCIE_HDP_CFG11(base)                   ((base) + 0x2cac)
-#define PCIE_INT(base)                         ((base) + 0x2cb0)
-#define PCIE_INT_MASK(base)                    ((base) + 0x2cb4)
-#define PCIE_MSI_MASK(base)                    ((base) + 0x2cb8)
-#define PCIE_MSI_PNDG(base)                    ((base) + 0x2cbc)
-#define PCIE_PRI_CFG(base)                     ((base) + 0x2cc0)
-#define PCIE_PHY_CR(base)                      ((base) + 0x2cc4)
-#define PCIE_HDP_CTAG_CTRL(base)               ((base) + 0x2cf4)
-#define PCIE_HDP_HHBM_BUF_PTR(base)            ((base) + 0x2d00)
-#define PCIE_HDP_HHBM_BUF_PTR_H(base)          ((base) + 0x2d04)
-#define PCIE_HDP_HHBM_BUF_FIFO_NOE(base)       ((base) + 0x2d04)
-#define PCIE_HDP_RX0DMA_CNT(base)              ((base) + 0x2d10)
-#define PCIE_HDP_RX1DMA_CNT(base)              ((base) + 0x2d14)
-#define PCIE_HDP_RX2DMA_CNT(base)              ((base) + 0x2d18)
-#define PCIE_HDP_RX3DMA_CNT(base)              ((base) + 0x2d1c)
-#define PCIE_HDP_TX0DMA_CNT(base)              ((base) + 0x2d20)
-#define PCIE_HDP_TX1DMA_CNT(base)              ((base) + 0x2d24)
-#define PCIE_HDP_RXDMA_CTRL(base)              ((base) + 0x2d28)
-#define PCIE_HDP_TX_HOST_Q_SZ_CTRL(base)       ((base) + 0x2d2c)
-#define PCIE_HDP_TX_HOST_Q_BASE_L(base)                ((base) + 0x2d30)
-#define PCIE_HDP_TX_HOST_Q_BASE_H(base)                ((base) + 0x2d34)
-#define PCIE_HDP_TX_HOST_Q_WR_PTR(base)                ((base) + 0x2d38)
-#define PCIE_HDP_TX_HOST_Q_RD_PTR(base)                ((base) + 0x2d3c)
-#define PCIE_HDP_TX_HOST_Q_STS(base)           ((base) + 0x2d40)
-
-/* Host HBM pool registers */
-#define PCIE_HHBM_CSR_REG(base)                        ((base) + 0x2e00)
-#define PCIE_HHBM_Q_BASE_REG(base)             ((base) + 0x2e04)
-#define PCIE_HHBM_Q_LIMIT_REG(base)            ((base) + 0x2e08)
-#define PCIE_HHBM_Q_WR_REG(base)               ((base) + 0x2e0c)
-#define PCIE_HHBM_Q_RD_REG(base)               ((base) + 0x2e10)
-#define PCIE_HHBM_POOL_DATA_0_H(base)          ((base) + 0x2e90)
-#define PCIE_HHBM_CONFIG(base)                 ((base) + 0x2f9c)
-#define PCIE_HHBM_POOL_REQ_0(base)             ((base) + 0x2f10)
-#define PCIE_HHBM_POOL_DATA_0(base)            ((base) + 0x2f40)
-#define PCIE_HHBM_WATERMARK_MASKED_INT(base)   ((base) + 0x2f68)
-#define PCIE_HHBM_WATERMARK_INT(base)          ((base) + 0x2f6c)
-#define PCIE_HHBM_POOL_WATERMARK(base)         ((base) + 0x2f70)
-#define PCIE_HHBM_POOL_OVERFLOW_CNT(base)      ((base) + 0x2f90)
-#define PCIE_HHBM_POOL_UNDERFLOW_CNT(base)     ((base) + 0x2f94)
-#define HBM_INT_STATUS(base)                   ((base) + 0x2f9c)
-#define PCIE_HHBM_POOL_CNFIG(base)             ((base) + 0x2f9c)
-
-/* host HBM bit field definition */
-#define HHBM_CONFIG_SOFT_RESET                 (BIT(8))
-#define HHBM_WR_REQ                            (BIT(0))
-#define HHBM_RD_REQ                            (BIT(1))
-#define HHBM_DONE                              (BIT(31))
-#define HHBM_64BIT                             (BIT(10))
-
-/* offsets for dual PCIE */
-#define PCIE_PORT_LINK_CTL(base)               ((base) + 0x0710)
-#define PCIE_GEN2_CTL(base)                    ((base) + 0x080C)
-#define PCIE_GEN3_OFF(base)                    ((base) + 0x0890)
-#define PCIE_ATU_CTRL1(base)                   ((base) + 0x0904)
-#define PCIE_ATU_CTRL2(base)                   ((base) + 0x0908)
-#define PCIE_ATU_BASE_LOW(base)                        ((base) + 0x090C)
-#define PCIE_ATU_BASE_HIGH(base)               ((base) + 0x0910)
-#define PCIE_ATU_BASE_LIMIT(base)              ((base) + 0x0914)
-#define PCIE_ATU_TGT_LOW(base)                 ((base) + 0x0918)
-#define PCIE_ATU_TGT_HIGH(base)                        ((base) + 0x091C)
-#define PCIE_DMA_WR_ENABLE(base)               ((base) + 0x097C)
-#define PCIE_DMA_WR_CHWTLOW(base)              ((base) + 0x0988)
-#define PCIE_DMA_WR_CHWTHIG(base)              ((base) + 0x098C)
-#define PCIE_DMA_WR_INTSTS(base)               ((base) + 0x09BC)
-#define PCIE_DMA_WR_INTMASK(base)              ((base) + 0x09C4)
-#define PCIE_DMA_WR_INTCLER(base)              ((base) + 0x09C8)
-#define PCIE_DMA_WR_DONE_IMWR_ADDR_L(base)     ((base) + 0x09D0)
-#define PCIE_DMA_WR_DONE_IMWR_ADDR_H(base)     ((base) + 0x09D4)
-#define PCIE_DMA_WR_ABORT_IMWR_ADDR_L(base)    ((base) + 0x09D8)
-#define PCIE_DMA_WR_ABORT_IMWR_ADDR_H(base)    ((base) + 0x09DC)
-#define PCIE_DMA_WR_IMWR_DATA(base)            ((base) + 0x09E0)
-#define PCIE_DMA_WR_LL_ERR_EN(base)            ((base) + 0x0A00)
-#define PCIE_DMA_WR_DOORBELL(base)             ((base) + 0x0980)
-#define PCIE_DMA_RD_ENABLE(base)               ((base) + 0x099C)
-#define PCIE_DMA_RD_DOORBELL(base)             ((base) + 0x09A0)
-#define PCIE_DMA_RD_CHWTLOW(base)              ((base) + 0x09A8)
-#define PCIE_DMA_RD_CHWTHIG(base)              ((base) + 0x09AC)
-#define PCIE_DMA_RD_INTSTS(base)               ((base) + 0x0A10)
-#define PCIE_DMA_RD_INTMASK(base)              ((base) + 0x0A18)
-#define PCIE_DMA_RD_INTCLER(base)              ((base) + 0x0A1C)
-#define PCIE_DMA_RD_ERR_STS_L(base)            ((base) + 0x0A24)
-#define PCIE_DMA_RD_ERR_STS_H(base)            ((base) + 0x0A28)
-#define PCIE_DMA_RD_LL_ERR_EN(base)            ((base) + 0x0A34)
-#define PCIE_DMA_RD_DONE_IMWR_ADDR_L(base)     ((base) + 0x0A3C)
-#define PCIE_DMA_RD_DONE_IMWR_ADDR_H(base)     ((base) + 0x0A40)
-#define PCIE_DMA_RD_ABORT_IMWR_ADDR_L(base)    ((base) + 0x0A44)
-#define PCIE_DMA_RD_ABORT_IMWR_ADDR_H(base)    ((base) + 0x0A48)
-#define PCIE_DMA_RD_IMWR_DATA(base)            ((base) + 0x0A4C)
-#define PCIE_DMA_CHNL_CONTEXT(base)            ((base) + 0x0A6C)
-#define PCIE_DMA_CHNL_CNTRL(base)              ((base) + 0x0A70)
-#define PCIE_DMA_XFR_SIZE(base)                        ((base) + 0x0A78)
-#define PCIE_DMA_SAR_LOW(base)                 ((base) + 0x0A7C)
-#define PCIE_DMA_SAR_HIGH(base)                        ((base) + 0x0A80)
-#define PCIE_DMA_DAR_LOW(base)                 ((base) + 0x0A84)
-#define PCIE_DMA_DAR_HIGH(base)                        ((base) + 0x0A88)
-#define PCIE_DMA_LLPTR_LOW(base)               ((base) + 0x0A8C)
-#define PCIE_DMA_LLPTR_HIGH(base)              ((base) + 0x0A90)
-#define PCIE_DMA_WRLL_ERR_ENB(base)            ((base) + 0x0A00)
-#define PCIE_DMA_RDLL_ERR_ENB(base)            ((base) + 0x0A34)
-#define PCIE_DMABD_CHNL_CNTRL(base)            ((base) + 0x8000)
-#define PCIE_DMABD_XFR_SIZE(base)              ((base) + 0x8004)
-#define PCIE_DMABD_SAR_LOW(base)               ((base) + 0x8008)
-#define PCIE_DMABD_SAR_HIGH(base)              ((base) + 0x800c)
-#define PCIE_DMABD_DAR_LOW(base)               ((base) + 0x8010)
-#define PCIE_DMABD_DAR_HIGH(base)              ((base) + 0x8014)
-#define PCIE_DMABD_LLPTR_LOW(base)             ((base) + 0x8018)
-#define PCIE_DMABD_LLPTR_HIGH(base)            ((base) + 0x801c)
-#define PCIE_WRDMA0_CHNL_CNTRL(base)           ((base) + 0x8000)
-#define PCIE_WRDMA0_XFR_SIZE(base)             ((base) + 0x8004)
-#define PCIE_WRDMA0_SAR_LOW(base)              ((base) + 0x8008)
-#define PCIE_WRDMA0_SAR_HIGH(base)             ((base) + 0x800c)
-#define PCIE_WRDMA0_DAR_LOW(base)              ((base) + 0x8010)
-#define PCIE_WRDMA0_DAR_HIGH(base)             ((base) + 0x8014)
-#define PCIE_WRDMA0_LLPTR_LOW(base)            ((base) + 0x8018)
-#define PCIE_WRDMA0_LLPTR_HIGH(base)           ((base) + 0x801c)
-#define PCIE_WRDMA1_CHNL_CNTRL(base)           ((base) + 0x8020)
-#define PCIE_WRDMA1_XFR_SIZE(base)             ((base) + 0x8024)
-#define PCIE_WRDMA1_SAR_LOW(base)              ((base) + 0x8028)
-#define PCIE_WRDMA1_SAR_HIGH(base)             ((base) + 0x802c)
-#define PCIE_WRDMA1_DAR_LOW(base)              ((base) + 0x8030)
-#define PCIE_WRDMA1_DAR_HIGH(base)             ((base) + 0x8034)
-#define PCIE_WRDMA1_LLPTR_LOW(base)            ((base) + 0x8038)
-#define PCIE_WRDMA1_LLPTR_HIGH(base)           ((base) + 0x803c)
-#define PCIE_RDDMA0_CHNL_CNTRL(base)           ((base) + 0x8040)
-#define PCIE_RDDMA0_XFR_SIZE(base)             ((base) + 0x8044)
-#define PCIE_RDDMA0_SAR_LOW(base)              ((base) + 0x8048)
-#define PCIE_RDDMA0_SAR_HIGH(base)             ((base) + 0x804c)
-#define PCIE_RDDMA0_DAR_LOW(base)              ((base) + 0x8050)
-#define PCIE_RDDMA0_DAR_HIGH(base)             ((base) + 0x8054)
-#define PCIE_RDDMA0_LLPTR_LOW(base)            ((base) + 0x8058)
-#define PCIE_RDDMA0_LLPTR_HIGH(base)           ((base) + 0x805c)
-#define PCIE_RDDMA1_CHNL_CNTRL(base)           ((base) + 0x8060)
-#define PCIE_RDDMA1_XFR_SIZE(base)             ((base) + 0x8064)
-#define PCIE_RDDMA1_SAR_LOW(base)              ((base) + 0x8068)
-#define PCIE_RDDMA1_SAR_HIGH(base)             ((base) + 0x806c)
-#define PCIE_RDDMA1_DAR_LOW(base)              ((base) + 0x8070)
-#define PCIE_RDDMA1_DAR_HIGH(base)             ((base) + 0x8074)
-#define PCIE_RDDMA1_LLPTR_LOW(base)            ((base) + 0x8078)
-#define PCIE_RDDMA1_LLPTR_HIGH(base)           ((base) + 0x807c)
-
-#define PCIE_ID(base)                          ((base) + 0x0000)
-#define PCIE_CMD(base)                         ((base) + 0x0004)
-#define PCIE_BAR(base, n)                      ((base) + 0x0010 + ((n) << 2))
-#define PCIE_CAP_PTR(base)                     ((base) + 0x0034)
-#define PCIE_MSI_LBAR(base)                    ((base) + 0x0054)
-#define PCIE_MSI_CTRL(base)                    ((base) + 0x0050)
-#define PCIE_MSI_ADDR_L(base)                  ((base) + 0x0054)
-#define PCIE_MSI_ADDR_H(base)                  ((base) + 0x0058)
-#define PCIE_MSI_DATA(base)                    ((base) + 0x005C)
-#define PCIE_MSI_MASK_BIT(base)                        ((base) + 0x0060)
-#define PCIE_MSI_PEND_BIT(base)                        ((base) + 0x0064)
-#define PCIE_DEVCAP(base)                      ((base) + 0x0074)
-#define PCIE_DEVCTLSTS(base)                   ((base) + 0x0078)
-
-#define PCIE_CMDSTS(base)                      ((base) + 0x0004)
-#define PCIE_LINK_STAT(base)                   ((base) + 0x80)
-#define PCIE_LINK_CTL2(base)                   ((base) + 0xa0)
-#define PCIE_ASPM_L1_CTRL(base)                        ((base) + 0x70c)
-#define PCIE_ASPM_LINK_CTRL(base)              (PCIE_LINK_STAT)
-#define PCIE_ASPM_L1_SUBSTATE_TIMING(base)     ((base) + 0xB44)
-#define PCIE_L1SUB_CTRL1(base)                 ((base) + 0x150)
-#define PCIE_PMCSR(base)                       ((base) + 0x44)
-#define PCIE_CFG_SPACE_LIMIT(base)             ((base) + 0x100)
-
-/* PCIe link defines */
-#define PEARL_PCIE_LINKUP                      (0x7)
-#define PEARL_PCIE_DATA_LINK                   (BIT(0))
-#define PEARL_PCIE_PHY_LINK                    (BIT(1))
-#define PEARL_PCIE_LINK_RST                    (BIT(3))
-#define PEARL_PCIE_FATAL_ERR                   (BIT(5))
-#define PEARL_PCIE_NONFATAL_ERR                        (BIT(6))
-
-/* PCIe Lane defines */
-#define PCIE_G2_LANE_X1                                ((BIT(0)) << 16)
-#define PCIE_G2_LANE_X2                                ((BIT(0) | BIT(1)) << 16)
-
-/* PCIe DLL link enable */
-#define PCIE_DLL_LINK_EN                       ((BIT(0)) << 5)
-
-#define PCIE_LINK_GEN1                         (BIT(0))
-#define PCIE_LINK_GEN2                         (BIT(1))
-#define PCIE_LINK_GEN3                         (BIT(2))
-#define PCIE_LINK_MODE(x)                      (((x) >> 16) & 0x7)
-
-#define MSI_EN                                 (BIT(0))
-#define MSI_64_EN                              (BIT(7))
-#define PCIE_MSI_ADDR_OFFSET(a)                        ((a) & 0xFFFF)
-#define PCIE_MSI_ADDR_ALIGN(a)                 ((a) & (~0xFFFF))
-
-#define PCIE_BAR_MASK(base, n)                 ((base) + 0x1010 + ((n) << 2))
-#define PCIE_MAX_BAR                           (6)
-
-#define PCIE_ATU_VIEW(base)                    ((base) + 0x0900)
-#define PCIE_ATU_CTL1(base)                    ((base) + 0x0904)
-#define PCIE_ATU_CTL2(base)                    ((base) + 0x0908)
-#define PCIE_ATU_LBAR(base)                    ((base) + 0x090c)
-#define PCIE_ATU_UBAR(base)                    ((base) + 0x0910)
-#define PCIE_ATU_LAR(base)                     ((base) + 0x0914)
-#define PCIE_ATU_LTAR(base)                    ((base) + 0x0918)
-#define PCIE_ATU_UTAR(base)                    ((base) + 0x091c)
-
-#define PCIE_MSI_ADDR_LOWER(base)              ((base) + 0x0820)
-#define PCIE_MSI_ADDR_UPPER(base)              ((base) + 0x0824)
-#define PCIE_MSI_ENABLE(base)                  ((base) + 0x0828)
-#define PCIE_MSI_MASK_RC(base)                 ((base) + 0x082c)
-#define PCIE_MSI_STATUS(base)                  ((base) + 0x0830)
-#define PEARL_PCIE_MSI_REGION                  (0xce000000)
-#define PEARL_PCIE_MSI_DATA                    (0)
-#define PCIE_MSI_GPIO(base)                    ((base) + 0x0888)
-
-#define PCIE_HDP_HOST_QUEUE_FULL       (BIT(17))
-#define USE_BAR_MATCH_MODE
-#define PCIE_ATU_OB_REGION             (BIT(0))
-#define PCIE_ATU_EN_REGION             (BIT(31))
-#define PCIE_ATU_EN_MATCH              (BIT(30))
-#define PCIE_BASE_REGION               (0xb0000000)
-#define PCIE_MEM_MAP_SIZE              (512 * 1024)
-
-#define PCIE_OB_REG_REGION             (0xcf000000)
-#define PCIE_CONFIG_REGION             (0xcf000000)
-#define PCIE_CONFIG_SIZE               (4096)
-#define PCIE_CONFIG_CH                 (1)
-
-/* inbound mapping */
-#define PCIE_IB_BAR0                   (0x00000000)    /* ddr */
-#define PCIE_IB_BAR0_CH                        (0)
-#define PCIE_IB_BAR3                   (0xe0000000)    /* sys_reg */
-#define PCIE_IB_BAR3_CH                        (1)
-
-/* outbound mapping */
-#define PCIE_MEM_CH                    (0)
-#define PCIE_REG_CH                    (1)
-#define PCIE_MEM_REGION                        (0xc0000000)
-#define        PCIE_MEM_SIZE                   (0x000fffff)
-#define PCIE_MEM_TAR                   (0x80000000)
-
-#define PCIE_MSI_REGION                        (0xce000000)
-#define PCIE_MSI_SIZE                  (KBYTE(4) - 1)
-#define PCIE_MSI_CH                    (1)
-
-/* size of config region */
-#define PCIE_CFG_SIZE                  (0x0000ffff)
-
-#define PCIE_ATU_DIR_IB                        (BIT(31))
-#define PCIE_ATU_DIR_OB                        (0)
-#define PCIE_ATU_DIR_CFG               (2)
-#define PCIE_ATU_DIR_MATCH_IB          (BIT(31) | BIT(30))
-
-#define PCIE_DMA_WR_0                  (0)
-#define PCIE_DMA_WR_1                  (1)
-#define PCIE_DMA_RD_0                  (2)
-#define PCIE_DMA_RD_1                  (3)
-
-#define PCIE_DMA_CHNL_CNTRL_CB         (BIT(0))
-#define PCIE_DMA_CHNL_CNTRL_TCB                (BIT(1))
-#define PCIE_DMA_CHNL_CNTRL_LLP                (BIT(2))
-#define PCIE_DMA_CHNL_CNTRL_LIE                (BIT(3))
-#define PCIE_DMA_CHNL_CNTRL_RIE                (BIT(4))
-#define PCIE_DMA_CHNL_CNTRL_CSS                (BIT(8))
-#define PCIE_DMA_CHNL_CNTRL_LLE                (BIT(9))
-#define PCIE_DMA_CHNL_CNTRL_TLP                (BIT(26))
-
-#define PCIE_DMA_CHNL_CONTEXT_RD       (BIT(31))
-#define PCIE_DMA_CHNL_CONTEXT_WR       (0)
-#define PCIE_MAX_BAR                   (6)
-
-/* PCIe HDP interrupt status definition */
-#define PCIE_HDP_INT_EP_RXDMA          (BIT(0))
-#define PCIE_HDP_INT_HBM_UF            (BIT(1))
-#define PCIE_HDP_INT_RX_LEN_ERR                (BIT(2))
-#define PCIE_HDP_INT_RX_HDR_LEN_ERR    (BIT(3))
-#define PCIE_HDP_INT_EP_TXDMA          (BIT(12))
-#define PCIE_HDP_INT_HHBM_UF           (BIT(13))
-#define PCIE_HDP_INT_EP_TXEMPTY                (BIT(15))
-#define PCIE_HDP_INT_IPC               (BIT(29))
-
-/* PCIe interrupt status definition */
-#define PCIE_INT_MSI                   (BIT(24))
-#define PCIE_INT_INTX                  (BIT(23))
-
-/* PCIe legacy INTx */
-#define PEARL_PCIE_CFG0_OFFSET         (0x6C)
-#define PEARL_ASSERT_INTX              (BIT(9))
-
-/* SYS CTL regs */
-#define QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET      (0x001C)
-
-#define QTN_PEARL_IPC_IRQ_WORD(irq)    (BIT(irq) | BIT(irq + 16))
-#define QTN_PEARL_LHOST_IPC_IRQ                (6)
-#define QTN_PEARL_LHOST_EP_RESET       (7)
-
-#endif /* __PEARL_PCIE_H */
index 99d37e3efba634e0e649349259eeaf5b5a4a72b9..8d62addea895fbf35871f89ec6c0c33917ff8b21 100644 (file)
@@ -71,6 +71,7 @@ struct qlink_msg_header {
  * @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality
  * @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address
  *     Randomization in probe requests.
+ * @QLINK_HW_CAPAB_OBSS_SCAN: device can perform OBSS scanning.
  */
 enum qlink_hw_capab {
        QLINK_HW_CAPAB_REG_UPDATE               = BIT(0),
@@ -78,6 +79,8 @@ enum qlink_hw_capab {
        QLINK_HW_CAPAB_DFS_OFFLOAD              = BIT(2),
        QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR     = BIT(3),
        QLINK_HW_CAPAB_PWR_MGMT                 = BIT(4),
+       QLINK_HW_CAPAB_OBSS_SCAN                = BIT(5),
+       QLINK_HW_CAPAB_SCAN_DWELL               = BIT(6),
 };
 
 enum qlink_iface_type {
@@ -1149,6 +1152,8 @@ enum qlink_tlv_id {
        QTN_TLV_ID_MAX_SCAN_SSIDS       = 0x0409,
        QTN_TLV_ID_WOWLAN_CAPAB         = 0x0410,
        QTN_TLV_ID_WOWLAN_PATTERN       = 0x0411,
+       QTN_TLV_ID_SCAN_FLUSH           = 0x0412,
+       QTN_TLV_ID_SCAN_DWELL           = 0x0413,
 };
 
 struct qlink_tlv_hdr {
index 54caeb38917c2c0e5a6c968a0ac4d7eb88812089..960d5d97492f9e67b2849bb296d2d7c4af3b825b 100644 (file)
@@ -40,6 +40,14 @@ static inline void qtnf_cmd_skb_put_tlv_arr(struct sk_buff *skb,
        memcpy(hdr->val, arr, arr_len);
 }
 
+static inline void qtnf_cmd_skb_put_tlv_tag(struct sk_buff *skb, u16 tlv_id)
+{
+       struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr));
+
+       hdr->type = cpu_to_le16(tlv_id);
+       hdr->len = cpu_to_le16(0);
+}
+
 static inline void qtnf_cmd_skb_put_tlv_u8(struct sk_buff *skb, u16 tlv_id,
                                           u8 value)
 {
index c4ad40d59085653dadb0e2dc704c2c3375cffde4..1fe798a9a667f2965b52eef7ba7c8e899d56990b 100644 (file)
 
 #define        PCIE_DEVICE_ID_QTN_PEARL        (0x0008)
 
+#define QTN_REG_SYS_CTRL_CSR           0x14
+#define QTN_CHIP_ID_MASK               0xF0
+#define QTN_CHIP_ID_TOPAZ              0x40
+#define QTN_CHIP_ID_PEARL              0x50
+#define QTN_CHIP_ID_PEARL_B            0x60
+#define QTN_CHIP_ID_PEARL_C            0x70
+
 /* FW names */
 
 #define QTN_PCI_PEARL_FW_NAME          "qtn/fmac_qsr10g.img"
 
+static inline unsigned int qtnf_chip_id_get(const void __iomem *regs_base)
+{
+       u32 board_rev = readl(regs_base + QTN_REG_SYS_CTRL_CSR);
+
+       return board_rev & QTN_CHIP_ID_MASK;
+}
+
 #endif /* _QTN_HW_IDS_H_ */
index aa106dd0a14ba0ff56a0ece547ff4b1ca2dce5ca..2ec334199c2bba9561c9366d1ea2bd0f9b08551e 100644 (file)
@@ -42,19 +42,18 @@ static void qtnf_shm_handle_new_data(struct qtnf_shm_ipc *ipc)
        if (unlikely(size == 0 || size > QTN_IPC_MAX_DATA_SZ)) {
                pr_err("wrong rx packet size: %zu\n", size);
                rx_buff_ok = false;
-       } else {
-               memcpy_fromio(ipc->rx_data, ipc->shm_region->data, size);
+       }
+
+       if (likely(rx_buff_ok)) {
+               ipc->rx_packet_count++;
+               ipc->rx_callback.fn(ipc->rx_callback.arg,
+                                   ipc->shm_region->data, size);
        }
 
        writel(QTNF_SHM_IPC_ACK, &shm_reg_hdr->flags);
        readl(&shm_reg_hdr->flags); /* flush PCIe write */
 
        ipc->interrupt.fn(ipc->interrupt.arg);
-
-       if (likely(rx_buff_ok)) {
-               ipc->rx_packet_count++;
-               ipc->rx_callback.fn(ipc->rx_callback.arg, ipc->rx_data, size);
-       }
 }
 
 static void qtnf_shm_ipc_irq_work(struct work_struct *work)
index 453dd6477b120c95b93c4be684b56d599639e50c..c2a3702a9ee7f99cce1f9ced6595ad7960f42708 100644 (file)
@@ -32,7 +32,7 @@ struct qtnf_shm_ipc_int {
 };
 
 struct qtnf_shm_ipc_rx_callback {
-       void (*fn)(void *arg, const u8 *buf, size_t len);
+       void (*fn)(void *arg, const u8 __iomem *buf, size_t len);
        void *arg;
 };
 
@@ -51,8 +51,6 @@ struct qtnf_shm_ipc {
 
        u8 waiting_for_ack;
 
-       u8 rx_data[QTN_IPC_MAX_DATA_SZ] __aligned(sizeof(u32));
-
        struct qtnf_shm_ipc_int interrupt;
        struct qtnf_shm_ipc_rx_callback rx_callback;
 
index a567bc273ffc6c725473710ecea9c82e5076284e..9e7b8933d30c5afedf1143eb7bb646365f4f2181 100644 (file)
@@ -957,6 +957,47 @@ static void rt2800_rate_from_status(struct skb_frame_desc *skbdesc,
        skbdesc->tx_rate_flags = flags;
 }
 
+static bool rt2800_txdone_entry_check(struct queue_entry *entry, u32 reg)
+{
+       __le32 *txwi;
+       u32 word;
+       int wcid, ack, pid;
+       int tx_wcid, tx_ack, tx_pid, is_agg;
+
+       /*
+        * This frames has returned with an IO error,
+        * so the status report is not intended for this
+        * frame.
+        */
+       if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
+               return false;
+
+       wcid    = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
+       ack     = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
+       pid     = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
+       is_agg  = rt2x00_get_field32(reg, TX_STA_FIFO_TX_AGGRE);
+
+       /*
+        * Validate if this TX status report is intended for
+        * this entry by comparing the WCID/ACK/PID fields.
+        */
+       txwi = rt2800_drv_get_txwi(entry);
+
+       word = rt2x00_desc_read(txwi, 1);
+       tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
+       tx_ack  = rt2x00_get_field32(word, TXWI_W1_ACK);
+       tx_pid  = rt2x00_get_field32(word, TXWI_W1_PACKETID);
+
+       if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) {
+               rt2x00_dbg(entry->queue->rt2x00dev,
+                          "TX status report missed for queue %d entry %d\n",
+                          entry->queue->qid, entry->entry_idx);
+               return false;
+       }
+
+       return true;
+}
+
 void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
                         bool match)
 {
@@ -1059,6 +1100,119 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
 }
 EXPORT_SYMBOL_GPL(rt2800_txdone_entry);
 
+void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
+{
+       struct data_queue *queue;
+       struct queue_entry *entry;
+       u32 reg;
+       u8 qid;
+       bool match;
+
+       while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
+               /*
+                * TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus qid is
+                * guaranteed to be one of the TX QIDs .
+                */
+               qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
+               queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
+
+               if (unlikely(rt2x00queue_empty(queue))) {
+                       rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
+                                  qid);
+                       break;
+               }
+
+               entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+
+               if (unlikely(test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
+                            !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))) {
+                       rt2x00_warn(rt2x00dev, "Data pending for entry %u in queue %u\n",
+                                   entry->entry_idx, qid);
+                       break;
+               }
+
+               match = rt2800_txdone_entry_check(entry, reg);
+               rt2800_txdone_entry(entry, reg, rt2800_drv_get_txwi(entry), match);
+       }
+}
+EXPORT_SYMBOL_GPL(rt2800_txdone);
+
+static inline bool rt2800_entry_txstatus_timeout(struct rt2x00_dev *rt2x00dev,
+                                                struct queue_entry *entry)
+{
+       bool ret;
+       unsigned long tout;
+
+       if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
+               return false;
+
+       if (test_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags))
+               tout = msecs_to_jiffies(50);
+       else
+               tout = msecs_to_jiffies(2000);
+
+       ret = time_after(jiffies, entry->last_action + tout);
+       if (unlikely(ret))
+               rt2x00_dbg(entry->queue->rt2x00dev,
+                          "TX status timeout for entry %d in queue %d\n",
+                          entry->entry_idx, entry->queue->qid);
+       return ret;
+}
+
+bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
+{
+       struct data_queue *queue;
+       struct queue_entry *entry;
+
+       if (!test_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags)) {
+               unsigned long tout = msecs_to_jiffies(1000);
+
+               if (time_before(jiffies, rt2x00dev->last_nostatus_check + tout))
+                       return false;
+       }
+
+       rt2x00dev->last_nostatus_check = jiffies;
+
+       tx_queue_for_each(rt2x00dev, queue) {
+               entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+               if (rt2800_entry_txstatus_timeout(rt2x00dev, entry))
+                       return true;
+       }
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(rt2800_txstatus_timeout);
+
+void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev)
+{
+       struct data_queue *queue;
+       struct queue_entry *entry;
+
+       /*
+        * Process any trailing TX status reports for IO failures,
+        * we loop until we find the first non-IO error entry. This
+        * can either be a frame which is free, is being uploaded,
+        * or has completed the upload but didn't have an entry
+        * in the TX_STAT_FIFO register yet.
+        */
+       tx_queue_for_each(rt2x00dev, queue) {
+               while (!rt2x00queue_empty(queue)) {
+                       entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+
+                       if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
+                           !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
+                               break;
+
+                       if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags) ||
+                           rt2800_entry_txstatus_timeout(rt2x00dev, entry))
+                               rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
+                       else
+                               break;
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(rt2800_txdone_nostatus);
+
 static unsigned int rt2800_hw_beacon_base(struct rt2x00_dev *rt2x00dev,
                                          unsigned int index)
 {
index 51d9c2a932cc4181efc4ac1248da5133a3c68836..0dff2c7b30105133122f60cac3417153f5eecc68 100644 (file)
@@ -195,6 +195,9 @@ void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *tx
 
 void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
                         bool match);
+void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
+void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev);
+bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev);
 
 void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
 void rt2800_clear_beacon(struct queue_entry *entry);
index e1a7ed7e4892efb62138268149228ad20cddc876..ddb88cfeace21816c5490fba5a1f07b8a1787d6d 100644 (file)
@@ -175,161 +175,6 @@ static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
        rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
 }
 
-static bool rt2800mmio_txdone_entry_check(struct queue_entry *entry, u32 status)
-{
-       __le32 *txwi;
-       u32 word;
-       int wcid, tx_wcid;
-
-       wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
-
-       txwi = rt2800_drv_get_txwi(entry);
-       word = rt2x00_desc_read(txwi, 1);
-       tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
-
-       return (tx_wcid == wcid);
-}
-
-static bool rt2800mmio_txdone_find_entry(struct queue_entry *entry, void *data)
-{
-       u32 status = *(u32 *)data;
-
-       /*
-        * rt2800pci hardware might reorder frames when exchanging traffic
-        * with multiple BA enabled STAs.
-        *
-        * For example, a tx queue
-        *    [ STA1 | STA2 | STA1 | STA2 ]
-        * can result in tx status reports
-        *    [ STA1 | STA1 | STA2 | STA2 ]
-        * when the hw decides to aggregate the frames for STA1 into one AMPDU.
-        *
-        * To mitigate this effect, associate the tx status to the first frame
-        * in the tx queue with a matching wcid.
-        */
-       if (rt2800mmio_txdone_entry_check(entry, status) &&
-           !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
-               /*
-                * Got a matching frame, associate the tx status with
-                * the frame
-                */
-               entry->status = status;
-               set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
-               return true;
-       }
-
-       /* Check the next frame */
-       return false;
-}
-
-static bool rt2800mmio_txdone_match_first(struct queue_entry *entry, void *data)
-{
-       u32 status = *(u32 *)data;
-
-       /*
-        * Find the first frame without tx status and assign this status to it
-        * regardless if it matches or not.
-        */
-       if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
-               /*
-                * Got a matching frame, associate the tx status with
-                * the frame
-                */
-               entry->status = status;
-               set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
-               return true;
-       }
-
-       /* Check the next frame */
-       return false;
-}
-static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
-                                             void *data)
-{
-       if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
-               rt2800_txdone_entry(entry, entry->status,
-                                   rt2800mmio_get_txwi(entry), true);
-               return false;
-       }
-
-       /* No more frames to release */
-       return true;
-}
-
-static bool rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
-{
-       struct data_queue *queue;
-       u32 status;
-       u8 qid;
-       int max_tx_done = 16;
-
-       while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
-               qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
-               if (unlikely(qid >= QID_RX)) {
-                       /*
-                        * Unknown queue, this shouldn't happen. Just drop
-                        * this tx status.
-                        */
-                       rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
-                                   qid);
-                       break;
-               }
-
-               queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
-               if (unlikely(queue == NULL)) {
-                       /*
-                        * The queue is NULL, this shouldn't happen. Stop
-                        * processing here and drop the tx status
-                        */
-                       rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
-                                   qid);
-                       break;
-               }
-
-               if (unlikely(rt2x00queue_empty(queue))) {
-                       /*
-                        * The queue is empty. Stop processing here
-                        * and drop the tx status.
-                        */
-                       rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
-                                   qid);
-                       break;
-               }
-
-               /*
-                * Let's associate this tx status with the first
-                * matching frame.
-                */
-               if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
-                                               Q_INDEX, &status,
-                                               rt2800mmio_txdone_find_entry)) {
-                       /*
-                        * We cannot match the tx status to any frame, so just
-                        * use the first one.
-                        */
-                       if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
-                                                       Q_INDEX, &status,
-                                                       rt2800mmio_txdone_match_first)) {
-                               rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
-                                           qid);
-                               break;
-                       }
-               }
-
-               /*
-                * Release all frames with a valid tx status.
-                */
-               rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
-                                          Q_INDEX, NULL,
-                                          rt2800mmio_txdone_release_entries);
-
-               if (--max_tx_done == 0)
-                       break;
-       }
-
-       return !max_tx_done;
-}
-
 static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
                                               struct rt2x00_field32 irq_field)
 {
@@ -346,20 +191,6 @@ static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
        spin_unlock_irq(&rt2x00dev->irqmask_lock);
 }
 
-void rt2800mmio_txstatus_tasklet(unsigned long data)
-{
-       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
-       if (rt2800mmio_txdone(rt2x00dev))
-               tasklet_schedule(&rt2x00dev->txstatus_tasklet);
-
-       /*
-        * No need to enable the tx status interrupt here as we always
-        * leave it enabled to minimize the possibility of a tx status
-        * register overflow. See comment in interrupt handler.
-        */
-}
-EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
-
 void rt2800mmio_pretbtt_tasklet(unsigned long data)
 {
        struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
@@ -424,12 +255,26 @@ void rt2800mmio_autowake_tasklet(unsigned long data)
 }
 EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
 
-static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
+static void rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
+{
+       bool timeout = false;
+
+       while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
+              (timeout = rt2800_txstatus_timeout(rt2x00dev))) {
+
+               rt2800_txdone(rt2x00dev);
+
+               if (timeout)
+                       rt2800_txdone_nostatus(rt2x00dev);
+       }
+}
+
+static bool rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev)
 {
        u32 status;
-       int i;
+       bool more = false;
 
-       /*
+       /* FIXEME: rewrite this comment
         * The TX_FIFO_STATUS interrupt needs special care. We should
         * read TX_STA_FIFO but we should do it immediately as otherwise
         * the register can overflow and we would lose status reports.
@@ -440,28 +285,36 @@ static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
         * because we can schedule the tasklet multiple times (when the
         * interrupt fires again during tx status processing).
         *
-        * Furthermore we don't disable the TX_FIFO_STATUS
-        * interrupt here but leave it enabled so that the TX_STA_FIFO
-        * can also be read while the tx status tasklet gets executed.
-        *
-        * Since we have only one producer and one consumer we don't
+        * txstatus tasklet is called with INT_SOURCE_CSR_TX_FIFO_STATUS
+        * disabled so have only one producer and one consumer - we don't
         * need to lock the kfifo.
         */
-       for (i = 0; i < rt2x00dev->tx->limit; i++) {
+       while (!kfifo_is_full(&rt2x00dev->txstatus_fifo)) {
                status = rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO);
-
                if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
                        break;
 
-               if (!kfifo_put(&rt2x00dev->txstatus_fifo, status)) {
-                       rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
-                       break;
-               }
+               kfifo_put(&rt2x00dev->txstatus_fifo, status);
+               more = true;
        }
 
-       /* Schedule the tasklet for processing the tx status. */
-       tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+       return more;
+}
+
+void rt2800mmio_txstatus_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+
+       do {
+               rt2800mmio_txdone(rt2x00dev);
+
+       } while (rt2800mmio_fetch_txstatus(rt2x00dev));
+
+       if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+               rt2800mmio_enable_interrupt(rt2x00dev,
+                                           INT_SOURCE_CSR_TX_FIFO_STATUS);
 }
+EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
 
 irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
 {
@@ -486,11 +339,8 @@ irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
        mask = ~reg;
 
        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
-               rt2800mmio_txstatus_interrupt(rt2x00dev);
-               /*
-                * Never disable the TX_FIFO_STATUS interrupt.
-                */
-               rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+               rt2800mmio_fetch_txstatus(rt2x00dev);
+               tasklet_schedule(&rt2x00dev->txstatus_tasklet);
        }
 
        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
@@ -616,6 +466,53 @@ void rt2800mmio_kick_queue(struct data_queue *queue)
 }
 EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
 
+void rt2800mmio_flush_queue(struct data_queue *queue, bool drop)
+{
+       struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+       bool tx_queue = false;
+       unsigned int i;
+
+       switch (queue->qid) {
+       case QID_AC_VO:
+       case QID_AC_VI:
+       case QID_AC_BE:
+       case QID_AC_BK:
+               tx_queue = true;
+               break;
+       case QID_RX:
+               break;
+       default:
+               return;
+       }
+
+       for (i = 0; i < 5; i++) {
+               /*
+                * Check if the driver is already done, otherwise we
+                * have to sleep a little while to give the driver/hw
+                * the oppurtunity to complete interrupt process itself.
+                */
+               if (rt2x00queue_empty(queue))
+                       break;
+
+               /*
+                * For TX queues schedule completion tasklet to catch
+                * tx status timeouts, othewise just wait.
+                */
+               if (tx_queue) {
+                       tasklet_disable(&rt2x00dev->txstatus_tasklet);
+                       rt2800mmio_txdone(rt2x00dev);
+                       tasklet_enable(&rt2x00dev->txstatus_tasklet);
+               }
+
+               /*
+                * Wait for a little while to give the driver
+                * the oppurtunity to recover itself.
+                */
+               msleep(50);
+       }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_flush_queue);
+
 void rt2800mmio_stop_queue(struct data_queue *queue)
 {
        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
index b63312ce3f27e6599935f695524d3c4a5f807119..3a513273f4149f9d97312c62ebc866cc33b95ef0 100644 (file)
@@ -148,6 +148,7 @@ void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
 /* Queue handlers */
 void rt2800mmio_start_queue(struct data_queue *queue);
 void rt2800mmio_kick_queue(struct data_queue *queue);
+void rt2800mmio_flush_queue(struct data_queue *queue, bool drop);
 void rt2800mmio_stop_queue(struct data_queue *queue);
 void rt2800mmio_queue_init(struct data_queue *queue);
 
index 71b1affc38856be9cb6ef8a584b6244463c1a733..0291441ac54827793c45c4d700df3fcb1b3cd835 100644 (file)
@@ -364,7 +364,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
        .start_queue            = rt2800mmio_start_queue,
        .kick_queue             = rt2800mmio_kick_queue,
        .stop_queue             = rt2800mmio_stop_queue,
-       .flush_queue            = rt2x00mmio_flush_queue,
+       .flush_queue            = rt2800mmio_flush_queue,
        .write_tx_desc          = rt2800mmio_write_tx_desc,
        .write_tx_data          = rt2800_write_tx_data,
        .write_beacon           = rt2800_write_beacon,
index 98a7313fea4aeee3bb49d01d871ab309b60f61f3..19eabf16147bfe0642938894a065d61c4d9809e3 100644 (file)
@@ -116,35 +116,6 @@ static bool rt2800usb_txstatus_pending(struct rt2x00_dev *rt2x00dev)
        return false;
 }
 
-static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry)
-{
-       bool tout;
-
-       if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
-               return false;
-
-       tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(500));
-       if (unlikely(tout))
-               rt2x00_dbg(entry->queue->rt2x00dev,
-                          "TX status timeout for entry %d in queue %d\n",
-                          entry->entry_idx, entry->queue->qid);
-       return tout;
-
-}
-
-static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
-{
-       struct data_queue *queue;
-       struct queue_entry *entry;
-
-       tx_queue_for_each(rt2x00dev, queue) {
-               entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
-               if (rt2800usb_entry_txstatus_timeout(entry))
-                       return true;
-       }
-       return false;
-}
-
 #define TXSTATUS_READ_INTERVAL 1000000
 
 static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
@@ -171,7 +142,7 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
        }
 
        /* Check if there is any entry that timedout waiting on TX status */
-       if (rt2800usb_txstatus_timeout(rt2x00dev))
+       if (rt2800_txstatus_timeout(rt2x00dev))
                queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
 
        if (rt2800usb_txstatus_pending(rt2x00dev)) {
@@ -501,123 +472,17 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
 /*
  * TX control handlers
  */
-static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
-{
-       __le32 *txwi;
-       u32 word;
-       int wcid, ack, pid;
-       int tx_wcid, tx_ack, tx_pid, is_agg;
-
-       /*
-        * This frames has returned with an IO error,
-        * so the status report is not intended for this
-        * frame.
-        */
-       if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
-               return false;
-
-       wcid    = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
-       ack     = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
-       pid     = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
-       is_agg  = rt2x00_get_field32(reg, TX_STA_FIFO_TX_AGGRE);
-
-       /*
-        * Validate if this TX status report is intended for
-        * this entry by comparing the WCID/ACK/PID fields.
-        */
-       txwi = rt2800usb_get_txwi(entry);
-
-       word = rt2x00_desc_read(txwi, 1);
-       tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
-       tx_ack  = rt2x00_get_field32(word, TXWI_W1_ACK);
-       tx_pid  = rt2x00_get_field32(word, TXWI_W1_PACKETID);
-
-       if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) {
-               rt2x00_dbg(entry->queue->rt2x00dev,
-                          "TX status report missed for queue %d entry %d\n",
-                          entry->queue->qid, entry->entry_idx);
-               return false;
-       }
-
-       return true;
-}
-
-static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
-{
-       struct data_queue *queue;
-       struct queue_entry *entry;
-       u32 reg;
-       u8 qid;
-       bool match;
-
-       while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
-               /*
-                * TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus qid is
-                * guaranteed to be one of the TX QIDs .
-                */
-               qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
-               queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
-
-               if (unlikely(rt2x00queue_empty(queue))) {
-                       rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
-                                  qid);
-                       break;
-               }
-
-               entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
-
-               if (unlikely(test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
-                            !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))) {
-                       rt2x00_warn(rt2x00dev, "Data pending for entry %u in queue %u\n",
-                                   entry->entry_idx, qid);
-                       break;
-               }
-
-               match = rt2800usb_txdone_entry_check(entry, reg);
-               rt2800_txdone_entry(entry, reg, rt2800usb_get_txwi(entry), match);
-       }
-}
-
-static void rt2800usb_txdone_nostatus(struct rt2x00_dev *rt2x00dev)
-{
-       struct data_queue *queue;
-       struct queue_entry *entry;
-
-       /*
-        * Process any trailing TX status reports for IO failures,
-        * we loop until we find the first non-IO error entry. This
-        * can either be a frame which is free, is being uploaded,
-        * or has completed the upload but didn't have an entry
-        * in the TX_STAT_FIFO register yet.
-        */
-       tx_queue_for_each(rt2x00dev, queue) {
-               while (!rt2x00queue_empty(queue)) {
-                       entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
-
-                       if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
-                           !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
-                               break;
-
-                       if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags) ||
-                           rt2800usb_entry_txstatus_timeout(entry))
-                               rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
-                       else
-                               break;
-               }
-       }
-}
-
 static void rt2800usb_work_txdone(struct work_struct *work)
 {
        struct rt2x00_dev *rt2x00dev =
            container_of(work, struct rt2x00_dev, txdone_work);
 
        while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
-              rt2800usb_txstatus_timeout(rt2x00dev)) {
+              rt2800_txstatus_timeout(rt2x00dev)) {
 
-               rt2800usb_txdone(rt2x00dev);
+               rt2800_txdone(rt2x00dev);
 
-               rt2800usb_txdone_nostatus(rt2x00dev);
+               rt2800_txdone_nostatus(rt2x00dev);
 
                /*
                 * The hw may delay sending the packet after DMA complete
index a279a4363bc15a2e0f502dd2d0428291df59eba3..4b1744e9fb78a08c59fe0ac71d0d9962ae6761be 100644 (file)
@@ -665,6 +665,7 @@ enum rt2x00_state_flags {
        DEVICE_STATE_STARTED,
        DEVICE_STATE_ENABLED_RADIO,
        DEVICE_STATE_SCANNING,
+       DEVICE_STATE_FLUSHING,
 
        /*
         * Driver configuration
@@ -980,6 +981,8 @@ struct rt2x00_dev {
         */
        DECLARE_KFIFO_PTR(txstatus_fifo, u32);
 
+       unsigned long last_nostatus_check;
+
        /*
         * Timer to ensure tx status reports are read (rt2800usb).
         */
index acc399b5574e021de4e9767be7c18937b14caf69..61ba573e8bf13910f3892aa72153725e4311d663 100644 (file)
@@ -464,11 +464,7 @@ static ssize_t rt2x00debug_read_##__name(struct file *file,        \
                                                                \
        size = sprintf(line, __format, value);                  \
                                                                \
-       if (copy_to_user(buf, line, size))                      \
-               return -EFAULT;                                 \
-                                                               \
-       *offset += size;                                        \
-       return size;                                            \
+       return simple_read_from_buffer(buf, length, offset, line, size); \
 }
 
 #define RT2X00DEBUGFS_OPS_WRITE(__name, __type)                        \
@@ -545,11 +541,7 @@ static ssize_t rt2x00debug_read_dev_flags(struct file *file,
 
        size = sprintf(line, "0x%.8x\n", (unsigned int)intf->rt2x00dev->flags);
 
-       if (copy_to_user(buf, line, size))
-               return -EFAULT;
-
-       *offset += size;
-       return size;
+       return simple_read_from_buffer(buf, length, offset, line, size);
 }
 
 static const struct file_operations rt2x00debug_fop_dev_flags = {
@@ -574,11 +566,7 @@ static ssize_t rt2x00debug_read_cap_flags(struct file *file,
 
        size = sprintf(line, "0x%.8x\n", (unsigned int)intf->rt2x00dev->cap_flags);
 
-       if (copy_to_user(buf, line, size))
-               return -EFAULT;
-
-       *offset += size;
-       return size;
+       return simple_read_from_buffer(buf, length, offset, line, size);
 }
 
 static const struct file_operations rt2x00debug_fop_cap_flags = {
index fa2fd64084ac91c497c0b32f9f465af30d5ad076..2825560e2424dbc766c5d5489491ff7dc67c5211 100644 (file)
@@ -720,8 +720,12 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
                return;
 
+       set_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags);
+
        tx_queue_for_each(rt2x00dev, queue)
                rt2x00queue_flush_queue(queue, drop);
+
+       clear_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags);
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_flush);
 
index 710e9641552e8ed65f5229f7883242a4b92970b1..92ddc19e7bf747a23d0eb24c15b05ff111751754 100644 (file)
@@ -113,6 +113,7 @@ int rt2x00queue_map_txskb(struct queue_entry *entry)
                return -ENOMEM;
 
        skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
+       rt2x00lib_dmadone(entry);
        return 0;
 }
 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
@@ -1038,6 +1039,7 @@ void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
         */
        tx_queue_for_each(rt2x00dev, queue)
                rt2x00queue_start_queue(queue);
+       rt2x00dev->last_nostatus_check = jiffies;
 
        rt2x00queue_start_queue(rt2x00dev->rx);
 }
index c2d5b495c179a1021dd4cd4221c0032f3a99e34a..c089540116fa72e6952c5a2670aaf32cb99a53e8 100644 (file)
@@ -146,7 +146,7 @@ static int rtl8187_register_led(struct ieee80211_hw *dev,
        led->dev = dev;
        led->ledpin = ledpin;
        led->is_radio = is_radio;
-       strncpy(led->name, name, sizeof(led->name));
+       strlcpy(led->name, name, sizeof(led->name));
 
        led->led_dev.name = led->name;
        led->led_dev.default_trigger = default_trigger;
index 505ab1b055ff43cecd95dabf6634bfbc4f7c6dad..73f6fc0d4a018c5e5e9dd78d73662888076de969 100644 (file)
@@ -6231,6 +6231,8 @@ static const struct usb_device_id dev_table[] = {
 {USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192cu_fops},
 /* Currently untested 8188 series devices */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x018a, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192cu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8170, 0xff, 0xff, 0xff),
index b026e80940a4dc6fa57d790ca9787b59a57d7c06..6fbf8845a2ab6d03220df3d3735ab2113dc06fcc 100644 (file)
@@ -1324,13 +1324,13 @@ bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv)
 
        switch (rtlpriv->rtlhal.interface) {
        case INTF_PCI:
-               wifionly_cfg->chip_interface = BTC_INTF_PCI;
+               wifionly_cfg->chip_interface = WIFIONLY_INTF_PCI;
                break;
        case INTF_USB:
-               wifionly_cfg->chip_interface = BTC_INTF_USB;
+               wifionly_cfg->chip_interface = WIFIONLY_INTF_USB;
                break;
        default:
-               wifionly_cfg->chip_interface = BTC_INTF_UNKNOWN;
+               wifionly_cfg->chip_interface = WIFIONLY_INTF_UNKNOWN;
                break;
        }
 
index 988d5ac57d026bb7e4dc6a5f20acd87b42df69a4..cfc8762c55f4aeaf6d2b1a0e0801f05f58ee992b 100644 (file)
@@ -951,12 +951,8 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
 static void _rtl88ee_hw_configure(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 reg_bw_opmode;
-       u32 reg_ratr, reg_prsr;
+       u32 reg_prsr;
 
-       reg_bw_opmode = BW_OPMODE_20MHZ;
-       reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
-           RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
        reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
 
        rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr);
index 545115db507e71c9876b66e53ceee5486b76e1a1..f783e4a8083daf8bf15808a1b5cc96f25a41493f 100644 (file)
@@ -799,11 +799,9 @@ static void _rtl8723e_hw_configure(struct ieee80211_hw *hw)
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        u8 reg_bw_opmode;
-       u32 reg_ratr, reg_prsr;
+       u32 reg_prsr;
 
        reg_bw_opmode = BW_OPMODE_20MHZ;
-       reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
-           RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
        reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
 
        rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8);
index 0f3b98c5227f2f40163a60fa2b12c9ba91924879..87bc21bb5e8bdae090280cd3fc3bcd14ad3dadca 100644 (file)
@@ -1905,10 +1905,6 @@ struct rtl_efuse {
        u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
        u16 efuse_usedbytes;
        u8 efuse_usedpercentage;
-#ifdef EFUSE_REPG_WORKAROUND
-       bool efuse_re_pg_sec1flag;
-       u8 efuse_re_pg_data[8];
-#endif
 
        u8 autoload_failflag;
        u8 autoload_status;
index 01edf960ff3cc72f5c4d383acd915525f1bbe333..182b0662937145eac36293c8ebe638c6d467fa34 100644 (file)
@@ -282,10 +282,8 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
        struct rsi_hw *adapter = common->priv;
        struct ieee80211_vif *vif;
        struct ieee80211_tx_info *info;
-       struct skb_info *tx_params;
        struct ieee80211_bss_conf *bss;
        int status = -EINVAL;
-       u8 header_size;
 
        if (!skb)
                return 0;
@@ -297,8 +295,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
                goto err;
        vif = info->control.vif;
        bss = &vif->bss_conf;
-       tx_params = (struct skb_info *)info->driver_data;
-       header_size = tx_params->internal_hdr_size;
 
        if (((vif->type == NL80211_IFTYPE_STATION) ||
             (vif->type == NL80211_IFTYPE_P2P_CLIENT)) &&
index 4e510cbe0a89f011d6f5af6db22846146702c1e8..e56fc83faf0ef8d76b75719cde4547fd1cbcdf17 100644 (file)
@@ -924,7 +924,7 @@ static int rsi_hal_key_config(struct ieee80211_hw *hw,
        if (status)
                return status;
 
-       if (vif->type == NL80211_IFTYPE_STATION && key->key &&
+       if (vif->type == NL80211_IFTYPE_STATION &&
            (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
             key->cipher == WLAN_CIPHER_SUITE_WEP40)) {
                if (!rsi_send_block_unblock_frame(adapter->priv, false))
index c0a163e404029ce6f3cbb10100d2cf02ba7c0492..f360690396dd02a536ac7664a2c477028b3c2524 100644 (file)
@@ -266,15 +266,17 @@ static void rsi_rx_done_handler(struct urb *urb)
        if (urb->status)
                goto out;
 
-       if (urb->actual_length <= 0) {
-               rsi_dbg(INFO_ZONE, "%s: Zero length packet\n", __func__);
+       if (urb->actual_length <= 0 ||
+           urb->actual_length > rx_cb->rx_skb->len) {
+               rsi_dbg(INFO_ZONE, "%s: Invalid packet length = %d\n",
+                       __func__, urb->actual_length);
                goto out;
        }
        if (skb_queue_len(&dev->rx_q) >= RSI_MAX_RX_PKTS) {
                rsi_dbg(INFO_ZONE, "Max RX packets reached\n");
                goto out;
        }
-       skb_put(rx_cb->rx_skb, urb->actual_length);
+       skb_trim(rx_cb->rx_skb, urb->actual_length);
        skb_queue_tail(&dev->rx_q, rx_cb->rx_skb);
 
        rsi_set_event(&dev->rx_thread.event);
@@ -308,6 +310,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num)
        if (!skb)
                return -ENOMEM;
        skb_reserve(skb, MAX_DWORD_ALIGN_BYTES);
+       skb_put(skb, RSI_MAX_RX_USB_PKT_SIZE - MAX_DWORD_ALIGN_BYTES);
        dword_align_bytes = (unsigned long)skb->data & 0x3f;
        if (dword_align_bytes > 0)
                skb_push(skb, dword_align_bytes);
@@ -319,7 +322,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num)
                          usb_rcvbulkpipe(dev->usbdev,
                          dev->bulkin_endpoint_addr[ep_num - 1]),
                          urb->transfer_buffer,
-                         RSI_MAX_RX_USB_PKT_SIZE,
+                         skb->len,
                          rsi_rx_done_handler,
                          rx_cb);
 
index d9ff3b8be86ee19156ed3bb7c4d322a0aeea8515..60f1f286b030121cb38424b652bb840ed7661f26 100644 (file)
@@ -75,7 +75,6 @@ static inline int rsi_kill_thread(struct rsi_thread *handle)
        atomic_inc(&handle->thread_done);
        rsi_set_event(&handle->event);
 
-       wait_for_completion(&handle->completion);
        return kthread_stop(handle->task);
 }
 
index f7b1b0062db32382b993f2f90695d8af889ba9dc..8c800ef23159375ee0d4171cad192e0bcd441477 100644 (file)
@@ -624,9 +624,9 @@ cw1200_tx_h_bt(struct cw1200_common *priv,
                        priority = WSM_EPTA_PRIORITY_ACTION;
                else if (ieee80211_is_mgmt(t->hdr->frame_control))
                        priority = WSM_EPTA_PRIORITY_MGT;
-               else if ((wsm->queue_id == WSM_QUEUE_VOICE))
+               else if (wsm->queue_id == WSM_QUEUE_VOICE)
                        priority = WSM_EPTA_PRIORITY_VOICE;
-               else if ((wsm->queue_id == WSM_QUEUE_VIDEO))
+               else if (wsm->queue_id == WSM_QUEUE_VIDEO)
                        priority = WSM_EPTA_PRIORITY_VIDEO;
                else
                        priority = WSM_EPTA_PRIORITY_DATA;
index 89b0d0fade9f2bcf9594d2a892b155089d4425ab..26b18733687536fd4494568c4927f9e646e8cd72 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
 
 #include "wlcore.h"
 #include "debug.h"
@@ -957,6 +958,8 @@ static void wl1271_recovery_work(struct work_struct *work)
        BUG_ON(wl->conf.recovery.bug_on_recovery &&
               !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
 
+       clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
+
        if (wl->conf.recovery.no_recovery) {
                wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
                goto out_unlock;
@@ -6625,13 +6628,25 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
        }
 
 #ifdef CONFIG_PM
+       device_init_wakeup(wl->dev, true);
+
        ret = enable_irq_wake(wl->irq);
        if (!ret) {
                wl->irq_wake_enabled = true;
-               device_init_wakeup(wl->dev, 1);
                if (pdev_data->pwr_in_suspend)
                        wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
        }
+
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+       if (res) {
+               wl->wakeirq = res->start;
+               wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
+               ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
+               if (ret)
+                       wl->wakeirq = -ENODEV;
+       } else {
+               wl->wakeirq = -ENODEV;
+       }
 #endif
        disable_irq(wl->irq);
        wl1271_power_off(wl);
@@ -6659,6 +6674,9 @@ out_unreg:
        wl1271_unregister_hw(wl);
 
 out_irq:
+       if (wl->wakeirq >= 0)
+               dev_pm_clear_wake_irq(wl->dev);
+       device_init_wakeup(wl->dev, false);
        free_irq(wl->irq, wl);
 
 out_free_nvs:
@@ -6710,6 +6728,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
        int ret;
        unsigned long start_time = jiffies;
        bool pending = false;
+       bool recovery = false;
 
        /* Nothing to do if no ELP mode requested */
        if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
@@ -6726,7 +6745,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
 
        ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
        if (ret < 0) {
-               wl12xx_queue_recovery_work(wl);
+               recovery = true;
                goto err;
        }
 
@@ -6734,11 +6753,12 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
                ret = wait_for_completion_timeout(&compl,
                        msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
                if (ret == 0) {
-                       wl1271_error("ELP wakeup timeout!");
-                       wl12xx_queue_recovery_work(wl);
+                       wl1271_warning("ELP wakeup timeout!");
 
                        /* Return no error for runtime PM for recovery */
-                       return 0;
+                       ret = 0;
+                       recovery = true;
+                       goto err;
                }
        }
 
@@ -6753,6 +6773,12 @@ err:
        spin_lock_irqsave(&wl->wl_lock, flags);
        wl->elp_compl = NULL;
        spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+       if (recovery) {
+               set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
+               wl12xx_queue_recovery_work(wl);
+       }
+
        return ret;
 }
 
@@ -6815,10 +6841,16 @@ int wlcore_remove(struct platform_device *pdev)
        if (!wl->initialized)
                return 0;
 
-       if (wl->irq_wake_enabled) {
-               device_init_wakeup(wl->dev, 0);
-               disable_irq_wake(wl->irq);
+       if (wl->wakeirq >= 0) {
+               dev_pm_clear_wake_irq(wl->dev);
+               wl->wakeirq = -ENODEV;
        }
+
+       device_init_wakeup(wl->dev, false);
+
+       if (wl->irq_wake_enabled)
+               disable_irq_wake(wl->irq);
+
        wl1271_unregister_hw(wl);
 
        pm_runtime_put_sync(wl->dev);
index 750bea3574ee4e994b01f9e49a8046e1afa33211..4c2154b9e6a3e59bcb772b34031e6db860227ccb 100644 (file)
@@ -241,7 +241,7 @@ static const struct of_device_id wlcore_sdio_of_match_table[] = {
        { }
 };
 
-static int wlcore_probe_of(struct device *dev, int *irq,
+static int wlcore_probe_of(struct device *dev, int *irq, int *wakeirq,
                           struct wlcore_platdev_data *pdev_data)
 {
        struct device_node *np = dev->of_node;
@@ -259,6 +259,8 @@ static int wlcore_probe_of(struct device *dev, int *irq,
                return -EINVAL;
        }
 
+       *wakeirq = irq_of_parse_and_map(np, 1);
+
        /* optional clock frequency params */
        of_property_read_u32(np, "ref-clock-frequency",
                             &pdev_data->ref_clock_freq);
@@ -268,7 +270,7 @@ static int wlcore_probe_of(struct device *dev, int *irq,
        return 0;
 }
 #else
-static int wlcore_probe_of(struct device *dev, int *irq,
+static int wlcore_probe_of(struct device *dev, int *irq, int *wakeirq,
                           struct wlcore_platdev_data *pdev_data)
 {
        return -ENODATA;
@@ -280,10 +282,10 @@ static int wl1271_probe(struct sdio_func *func,
 {
        struct wlcore_platdev_data *pdev_data;
        struct wl12xx_sdio_glue *glue;
-       struct resource res[1];
+       struct resource res[2];
        mmc_pm_flag_t mmcflags;
        int ret = -ENOMEM;
-       int irq;
+       int irq, wakeirq;
        const char *chip_family;
 
        /* We are only able to handle the wlan function */
@@ -308,7 +310,7 @@ static int wl1271_probe(struct sdio_func *func,
        /* Use block mode for transferring over one block size of data */
        func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
-       ret = wlcore_probe_of(&func->dev, &irq, pdev_data);
+       ret = wlcore_probe_of(&func->dev, &irq, &wakeirq, pdev_data);
        if (ret)
                goto out;
 
@@ -351,6 +353,11 @@ static int wl1271_probe(struct sdio_func *func,
                       irqd_get_trigger_type(irq_get_irq_data(irq));
        res[0].name = "irq";
 
+       res[1].start = wakeirq;
+       res[1].flags = IORESOURCE_IRQ |
+                      irqd_get_trigger_type(irq_get_irq_data(wakeirq));
+       res[1].name = "wakeirq";
+
        ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
        if (ret) {
                dev_err(glue->dev, "can't add resources\n");
index d4b1f66ef45701871e76c0030adbbac2296414ff..dd14850b06033ebfa014fa5d351ec9bf0e0207fb 100644 (file)
@@ -199,8 +199,10 @@ struct wl1271 {
        struct wl1271_if_operations *if_ops;
 
        int irq;
+       int wakeirq;
 
        int irq_flags;
+       int wakeirq_flags;
 
        spinlock_t wl_lock;
 
index 1f6d9f357e57de8d733e86de73ac90b64cf3ffd5..9ccd780695f0a6cb8a1eec04ffcc351942d0130e 100644 (file)
@@ -235,7 +235,7 @@ void zd_mac_clear(struct zd_mac *mac)
 {
        flush_workqueue(zd_workqueue);
        zd_chip_clear(&mac->chip);
-       ZD_ASSERT(!spin_is_locked(&mac->lock));
+       lockdep_assert_held(&mac->lock);
        ZD_MEMCLEAR(mac, sizeof(struct zd_mac));
 }
 
index a46a1e94505d01d782efea328945d91252449981..936c0b3e0ba28ec1f6586a5bab86d403a86d0dd6 100644 (file)
@@ -241,8 +241,9 @@ struct xenvif_hash_cache {
 struct xenvif_hash {
        unsigned int alg;
        u32 flags;
+       bool mapping_sel;
        u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
-       u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
+       u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
        unsigned int size;
        struct xenvif_hash_cache cache;
 };
index 3c4c58b9fe76edfbf3d27fb5b6dbd0184ba706c0..0ccb021f1e78687d7c7a9814a05369aafe7c6508 100644 (file)
@@ -324,7 +324,8 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
                return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
 
        vif->hash.size = size;
-       memset(vif->hash.mapping, 0, sizeof(u32) * size);
+       memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
+              sizeof(u32) * size);
 
        return XEN_NETIF_CTRL_STATUS_SUCCESS;
 }
@@ -332,31 +333,49 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
 u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
                            u32 off)
 {
-       u32 *mapping = &vif->hash.mapping[off];
-       struct gnttab_copy copy_op = {
+       u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
+       unsigned int nr = 1;
+       struct gnttab_copy copy_op[2] = {{
                .source.u.ref = gref,
                .source.domid = vif->domid,
-               .dest.u.gmfn = virt_to_gfn(mapping),
                .dest.domid = DOMID_SELF,
-               .dest.offset = xen_offset_in_page(mapping),
-               .len = len * sizeof(u32),
+               .len = len * sizeof(*mapping),
                .flags = GNTCOPY_source_gref
-       };
+       }};
 
-       if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+       if ((off + len < off) || (off + len > vif->hash.size) ||
+           len > XEN_PAGE_SIZE / sizeof(*mapping))
                return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
 
-       while (len-- != 0)
-               if (mapping[off++] >= vif->num_queues)
-                       return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+       copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
+       copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
+       if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
+               copy_op[1] = copy_op[0];
+               copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
+               copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
+               copy_op[1].dest.offset = 0;
+               copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
+               copy_op[0].len = copy_op[1].source.offset;
+               nr = 2;
+       }
 
-       if (copy_op.len != 0) {
-               gnttab_batch_copy(&copy_op, 1);
+       memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
+              vif->hash.size * sizeof(*mapping));
 
-               if (copy_op.status != GNTST_okay)
+       if (copy_op[0].len != 0) {
+               gnttab_batch_copy(copy_op, nr);
+
+               if (copy_op[0].status != GNTST_okay ||
+                   copy_op[nr - 1].status != GNTST_okay)
                        return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
        }
 
+       while (len-- != 0)
+               if (mapping[off++] >= vif->num_queues)
+                       return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+       vif->hash.mapping_sel = !vif->hash.mapping_sel;
+
        return XEN_NETIF_CTRL_STATUS_SUCCESS;
 }
 
@@ -408,6 +427,8 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
        }
 
        if (vif->hash.size != 0) {
+               const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
+
                seq_puts(m, "\nHash Mapping:\n");
 
                for (i = 0; i < vif->hash.size; ) {
@@ -420,7 +441,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
                        seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
 
                        for (j = 0; j < n; j++, i++)
-                               seq_printf(m, "%4u ", vif->hash.mapping[i]);
+                               seq_printf(m, "%4u ", mapping[i]);
 
                        seq_puts(m, "\n");
                }
index 92274c2372008a57ba12ca960bafa84cd2eac7b3..182d6770f1027120b5cd7165e4b7eea7c8297299 100644 (file)
@@ -162,10 +162,12 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
        if (size == 0)
                return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
 
-       return vif->hash.mapping[skb_get_hash_raw(skb) % size];
+       return vif->hash.mapping[vif->hash.mapping_sel]
+                               [skb_get_hash_raw(skb) % size];
 }
 
-static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
        struct xenvif_queue *queue = NULL;
index 5a9562881d4ef87ddfbd749555f1c8eccf01acc5..9fe3fff818b8a42281b30bcd3bba83c0e0dd36f8 100644 (file)
@@ -537,8 +537,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 
        INIT_WORK(&ctrl->ana_work, nvme_ana_work);
        ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
-       if (!ctrl->ana_log_buf)
+       if (!ctrl->ana_log_buf) {
+               error = -ENOMEM;
                goto out;
+       }
 
        error = nvme_read_ana_log(ctrl, true);
        if (error)
@@ -547,7 +549,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 out_free_ana_log_buf:
        kfree(ctrl->ana_log_buf);
 out:
-       return -ENOMEM;
+       return error;
 }
 
 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
index 778c4f76a884320b0fad349260251acf796462a2..2153956a0b207cae268ffabf8392a3025f22432b 100644 (file)
@@ -135,7 +135,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
                if (val & PCIE_ATU_ENABLE)
                        return;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 }
@@ -178,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
                if (val & PCIE_ATU_ENABLE)
                        return;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 }
@@ -236,7 +236,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
                if (val & PCIE_ATU_ENABLE)
                        return 0;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 
@@ -282,7 +282,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
                if (val & PCIE_ATU_ENABLE)
                        return 0;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 
index 96126fd8403ccd79450936b7d4f52de551d07c40..9f1a5e399b7033eba9918a7981bdfdb0ea957a7a 100644 (file)
@@ -26,8 +26,7 @@
 
 /* Parameters for the waiting for iATU enabled routine */
 #define LINK_WAIT_MAX_IATU_RETRIES     5
-#define LINK_WAIT_IATU_MIN             9000
-#define LINK_WAIT_IATU_MAX             10000
+#define LINK_WAIT_IATU                 9
 
 /* Synopsys-specific PCIe configuration registers */
 #define PCIE_PORT_LINK_CONTROL         0x710
index 50eb0729385b8a86b106f561ad90898042a881e6..a41d79b8d46a2a8b7c2fc5c371e327643a74178c 100644 (file)
@@ -1145,7 +1145,6 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
 {
        struct device *dev = &pcie->pdev->dev;
        struct device_node *np = dev->of_node;
-       unsigned int i;
        int ret;
 
        INIT_LIST_HEAD(&pcie->resources);
@@ -1179,13 +1178,58 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
                                         resource_size(&pcie->io) - 1);
                pcie->realio.name = "PCI I/O";
 
+               pci_add_resource(&pcie->resources, &pcie->realio);
+       }
+
+       return devm_request_pci_bus_resources(dev, &pcie->resources);
+}
+
+/*
+ * This is a copy of pci_host_probe(), except that it does the I/O
+ * remap as the last step, once we are sure we won't fail.
+ *
+ * It should be removed once the I/O remap error handling issue has
+ * been sorted out.
+ */
+static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
+{
+       struct mvebu_pcie *pcie;
+       struct pci_bus *bus, *child;
+       int ret;
+
+       ret = pci_scan_root_bus_bridge(bridge);
+       if (ret < 0) {
+               dev_err(bridge->dev.parent, "Scanning root bridge failed");
+               return ret;
+       }
+
+       pcie = pci_host_bridge_priv(bridge);
+       if (resource_size(&pcie->io) != 0) {
+               unsigned int i;
+
                for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
                        pci_ioremap_io(i, pcie->io.start + i);
+       }
 
-               pci_add_resource(&pcie->resources, &pcie->realio);
+       bus = bridge->bus;
+
+       /*
+        * We insert PCI resources into the iomem_resource and
+        * ioport_resource trees in either pci_bus_claim_resources()
+        * or pci_bus_assign_resources().
+        */
+       if (pci_has_flag(PCI_PROBE_ONLY)) {
+               pci_bus_claim_resources(bus);
+       } else {
+               pci_bus_size_bridges(bus);
+               pci_bus_assign_resources(bus);
+
+               list_for_each_entry(child, &bus->children, node)
+                       pcie_bus_configure_settings(child);
        }
 
-       return devm_request_pci_bus_resources(dev, &pcie->resources);
+       pci_bus_add_devices(bus);
+       return 0;
 }
 
 static int mvebu_pcie_probe(struct platform_device *pdev)
@@ -1268,7 +1312,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
        bridge->align_resource = mvebu_pcie_align_resource;
        bridge->msi = pcie->msi;
 
-       return pci_host_probe(bridge);
+       return mvebu_pci_host_probe(bridge);
 }
 
 static const struct of_device_id mvebu_pcie_of_match_table[] = {
index ef0b1b6ba86f8fad2a570187252e7579e12ea129..12afa7fdf77e9569d78f517a77b01de129151937 100644 (file)
@@ -457,17 +457,18 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
 /**
  * enable_slot - enable, configure a slot
  * @slot: slot to be enabled
+ * @bridge: true if enable is for the whole bridge (not a single slot)
  *
  * This function should be called per *physical slot*,
  * not per each slot object in ACPI namespace.
  */
-static void enable_slot(struct acpiphp_slot *slot)
+static void enable_slot(struct acpiphp_slot *slot, bool bridge)
 {
        struct pci_dev *dev;
        struct pci_bus *bus = slot->bus;
        struct acpiphp_func *func;
 
-       if (bus->self && hotplug_is_native(bus->self)) {
+       if (bridge && bus->self && hotplug_is_native(bus->self)) {
                /*
                 * If native hotplug is used, it will take care of hotplug
                 * slot management and resource allocation for hotplug
@@ -701,7 +702,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
                                        trim_stale_devices(dev);
 
                        /* configure all functions */
-                       enable_slot(slot);
+                       enable_slot(slot, true);
                } else {
                        disable_slot(slot);
                }
@@ -785,7 +786,7 @@ static void hotplug_event(u32 type, struct acpiphp_context *context)
                if (bridge)
                        acpiphp_check_bridge(bridge);
                else if (!(slot->flags & SLOT_IS_GOING_AWAY))
-                       enable_slot(slot);
+                       enable_slot(slot, false);
 
                break;
 
@@ -973,7 +974,7 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
 
        /* configure all functions */
        if (!(slot->flags & SLOT_ENABLED))
-               enable_slot(slot);
+               enable_slot(slot, false);
 
        pci_unlock_rescan_remove();
        return 0;
index 1835f3a7aa8d2f5a502a0629bfb0c0cc96420dd4..51b6c81671c1e21baba57422cd90d8727992f8e1 100644 (file)
@@ -1289,12 +1289,12 @@ int pci_save_state(struct pci_dev *dev)
 EXPORT_SYMBOL(pci_save_state);
 
 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
-                                    u32 saved_val, int retry)
+                                    u32 saved_val, int retry, bool force)
 {
        u32 val;
 
        pci_read_config_dword(pdev, offset, &val);
-       if (val == saved_val)
+       if (!force && val == saved_val)
                return;
 
        for (;;) {
@@ -1313,25 +1313,36 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
 }
 
 static void pci_restore_config_space_range(struct pci_dev *pdev,
-                                          int start, int end, int retry)
+                                          int start, int end, int retry,
+                                          bool force)
 {
        int index;
 
        for (index = end; index >= start; index--)
                pci_restore_config_dword(pdev, 4 * index,
                                         pdev->saved_config_space[index],
-                                        retry);
+                                        retry, force);
 }
 
 static void pci_restore_config_space(struct pci_dev *pdev)
 {
        if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-               pci_restore_config_space_range(pdev, 10, 15, 0);
+               pci_restore_config_space_range(pdev, 10, 15, 0, false);
                /* Restore BARs before the command register. */
-               pci_restore_config_space_range(pdev, 4, 9, 10);
-               pci_restore_config_space_range(pdev, 0, 3, 0);
+               pci_restore_config_space_range(pdev, 4, 9, 10, false);
+               pci_restore_config_space_range(pdev, 0, 3, 0, false);
+       } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+               pci_restore_config_space_range(pdev, 12, 15, 0, false);
+
+               /*
+                * Force rewriting of prefetch registers to avoid S3 resume
+                * issues on Intel PCI bridges that occur when these
+                * registers are not explicitly written.
+                */
+               pci_restore_config_space_range(pdev, 9, 11, 0, true);
+               pci_restore_config_space_range(pdev, 0, 8, 0, false);
        } else {
-               pci_restore_config_space_range(pdev, 0, 15, 0);
+               pci_restore_config_space_range(pdev, 0, 15, 0, false);
        }
 }
 
index 5c8d452e35e2ea58251ba3160d6b9fef18870f3c..c89d3effd99d63b681e7e7cf37533b418ae101c1 100644 (file)
@@ -48,6 +48,7 @@ source "drivers/phy/lantiq/Kconfig"
 source "drivers/phy/marvell/Kconfig"
 source "drivers/phy/mediatek/Kconfig"
 source "drivers/phy/motorola/Kconfig"
+source "drivers/phy/mscc/Kconfig"
 source "drivers/phy/qualcomm/Kconfig"
 source "drivers/phy/ralink/Kconfig"
 source "drivers/phy/renesas/Kconfig"
index 84e3bd9c5665e36d390ec96eaddf7432ca73a7a6..ce8339ff00225b9c844c6e3110d679dea66cabc3 100644 (file)
@@ -18,6 +18,7 @@ obj-y                                 += broadcom/    \
                                           hisilicon/   \
                                           marvell/     \
                                           motorola/    \
+                                          mscc/        \
                                           qualcomm/    \
                                           ralink/      \
                                           samsung/     \
diff --git a/drivers/phy/mscc/Kconfig b/drivers/phy/mscc/Kconfig
new file mode 100644 (file)
index 0000000..2e2a466
--- /dev/null
@@ -0,0 +1,11 @@
+#
+# Phy drivers for Microsemi devices
+#
+
+config PHY_OCELOT_SERDES
+       tristate "SerDes PHY driver for Microsemi Ocelot"
+       select GENERIC_PHY
+       depends on OF
+       depends on MFD_SYSCON
+       help
+         Enable this for supporting SerDes muxing with Microsemi Ocelot.
diff --git a/drivers/phy/mscc/Makefile b/drivers/phy/mscc/Makefile
new file mode 100644 (file)
index 0000000..e147491
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the Microsemi phy drivers.
+#
+
+obj-$(CONFIG_PHY_OCELOT_SERDES) := phy-ocelot-serdes.o
diff --git a/drivers/phy/mscc/phy-ocelot-serdes.c b/drivers/phy/mscc/phy-ocelot-serdes.c
new file mode 100644 (file)
index 0000000..8936abd
--- /dev/null
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * SerDes PHY driver for Microsemi Ocelot
+ *
+ * Copyright (c) 2018 Microsemi
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <soc/mscc/ocelot_hsio.h>
+#include <dt-bindings/phy/phy-ocelot-serdes.h>
+
+struct serdes_ctrl {
+       struct regmap           *regs;
+       struct device           *dev;
+       struct phy              *phys[SERDES_MAX];
+};
+
+struct serdes_macro {
+       u8                      idx;
+       /* Not used when in QSGMII or PCIe mode */
+       int                     port;
+       struct serdes_ctrl      *ctrl;
+};
+
+#define MCB_S1G_CFG_TIMEOUT     50
+
+static int __serdes_write_mcb_s1g(struct regmap *regmap, u8 macro, u32 op)
+{
+       unsigned int regval;
+
+       regmap_write(regmap, HSIO_MCB_S1G_ADDR_CFG, op |
+                    HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(BIT(macro)));
+
+       return regmap_read_poll_timeout(regmap, HSIO_MCB_S1G_ADDR_CFG, regval,
+                                       (regval & op) != op, 100,
+                                       MCB_S1G_CFG_TIMEOUT * 1000);
+}
+
+static int serdes_commit_mcb_s1g(struct regmap *regmap, u8 macro)
+{
+       return __serdes_write_mcb_s1g(regmap, macro,
+               HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT);
+}
+
+static int serdes_update_mcb_s1g(struct regmap *regmap, u8 macro)
+{
+       return __serdes_write_mcb_s1g(regmap, macro,
+               HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT);
+}
+
+static int serdes_init_s1g(struct regmap *regmap, u8 serdes)
+{
+       int ret;
+
+       ret = serdes_update_mcb_s1g(regmap, serdes);
+       if (ret)
+               return ret;
+
+       regmap_update_bits(regmap, HSIO_S1G_COMMON_CFG,
+                          HSIO_S1G_COMMON_CFG_SYS_RST |
+                          HSIO_S1G_COMMON_CFG_ENA_LANE |
+                          HSIO_S1G_COMMON_CFG_ENA_ELOOP |
+                          HSIO_S1G_COMMON_CFG_ENA_FLOOP,
+                          HSIO_S1G_COMMON_CFG_ENA_LANE);
+
+       regmap_update_bits(regmap, HSIO_S1G_PLL_CFG,
+                          HSIO_S1G_PLL_CFG_PLL_FSM_ENA |
+                          HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M,
+                          HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(200) |
+                          HSIO_S1G_PLL_CFG_PLL_FSM_ENA);
+
+       regmap_update_bits(regmap, HSIO_S1G_MISC_CFG,
+                          HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA |
+                          HSIO_S1G_MISC_CFG_LANE_RST,
+                          HSIO_S1G_MISC_CFG_LANE_RST);
+
+       ret = serdes_commit_mcb_s1g(regmap, serdes);
+       if (ret)
+               return ret;
+
+       regmap_update_bits(regmap, HSIO_S1G_COMMON_CFG,
+                          HSIO_S1G_COMMON_CFG_SYS_RST,
+                          HSIO_S1G_COMMON_CFG_SYS_RST);
+
+       regmap_update_bits(regmap, HSIO_S1G_MISC_CFG,
+                          HSIO_S1G_MISC_CFG_LANE_RST, 0);
+
+       ret = serdes_commit_mcb_s1g(regmap, serdes);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+struct serdes_mux {
+       u8                      idx;
+       u8                      port;
+       enum phy_mode           mode;
+       u32                     mask;
+       u32                     mux;
+};
+
+#define SERDES_MUX(_idx, _port, _mode, _mask, _mux) {          \
+       .idx = _idx,                                            \
+       .port = _port,                                          \
+       .mode = _mode,                                          \
+       .mask = _mask,                                          \
+       .mux = _mux,                                            \
+}
+
+#define SERDES_MUX_SGMII(i, p, m, c) SERDES_MUX(i, p, PHY_MODE_SGMII, m, c)
+#define SERDES_MUX_QSGMII(i, p, m, c) SERDES_MUX(i, p, PHY_MODE_QSGMII, m, c)
+
+static const struct serdes_mux ocelot_serdes_muxes[] = {
+       SERDES_MUX_SGMII(SERDES1G(0), 0, 0, 0),
+       SERDES_MUX_SGMII(SERDES1G(1), 1, HSIO_HW_CFG_DEV1G_5_MODE, 0),
+       SERDES_MUX_SGMII(SERDES1G(1), 5, HSIO_HW_CFG_QSGMII_ENA |
+                        HSIO_HW_CFG_DEV1G_5_MODE, HSIO_HW_CFG_DEV1G_5_MODE),
+       SERDES_MUX_SGMII(SERDES1G(2), 2, HSIO_HW_CFG_DEV1G_4_MODE, 0),
+       SERDES_MUX_SGMII(SERDES1G(2), 4, HSIO_HW_CFG_QSGMII_ENA |
+                        HSIO_HW_CFG_DEV1G_4_MODE, HSIO_HW_CFG_DEV1G_4_MODE),
+       SERDES_MUX_SGMII(SERDES1G(3), 3, HSIO_HW_CFG_DEV1G_6_MODE, 0),
+       SERDES_MUX_SGMII(SERDES1G(3), 6, HSIO_HW_CFG_QSGMII_ENA |
+                        HSIO_HW_CFG_DEV1G_6_MODE, HSIO_HW_CFG_DEV1G_6_MODE),
+       SERDES_MUX_SGMII(SERDES1G(4), 4, HSIO_HW_CFG_QSGMII_ENA |
+                        HSIO_HW_CFG_DEV1G_4_MODE | HSIO_HW_CFG_DEV1G_9_MODE,
+                        0),
+       SERDES_MUX_SGMII(SERDES1G(4), 9, HSIO_HW_CFG_DEV1G_4_MODE |
+                        HSIO_HW_CFG_DEV1G_9_MODE, HSIO_HW_CFG_DEV1G_4_MODE |
+                        HSIO_HW_CFG_DEV1G_9_MODE),
+       SERDES_MUX_SGMII(SERDES1G(5), 5, HSIO_HW_CFG_QSGMII_ENA |
+                        HSIO_HW_CFG_DEV1G_5_MODE | HSIO_HW_CFG_DEV2G5_10_MODE,
+                        0),
+       SERDES_MUX_SGMII(SERDES1G(5), 10, HSIO_HW_CFG_PCIE_ENA |
+                        HSIO_HW_CFG_DEV1G_5_MODE | HSIO_HW_CFG_DEV2G5_10_MODE,
+                        HSIO_HW_CFG_DEV1G_5_MODE | HSIO_HW_CFG_DEV2G5_10_MODE),
+       SERDES_MUX_QSGMII(SERDES6G(0), 4, HSIO_HW_CFG_QSGMII_ENA,
+                         HSIO_HW_CFG_QSGMII_ENA),
+       SERDES_MUX_QSGMII(SERDES6G(0), 5, HSIO_HW_CFG_QSGMII_ENA,
+                         HSIO_HW_CFG_QSGMII_ENA),
+       SERDES_MUX_QSGMII(SERDES6G(0), 6, HSIO_HW_CFG_QSGMII_ENA,
+                         HSIO_HW_CFG_QSGMII_ENA),
+       SERDES_MUX_SGMII(SERDES6G(0), 7, HSIO_HW_CFG_QSGMII_ENA, 0),
+       SERDES_MUX_QSGMII(SERDES6G(0), 7, HSIO_HW_CFG_QSGMII_ENA,
+                         HSIO_HW_CFG_QSGMII_ENA),
+       SERDES_MUX_SGMII(SERDES6G(1), 8, 0, 0),
+       SERDES_MUX_SGMII(SERDES6G(2), 10, HSIO_HW_CFG_PCIE_ENA |
+                        HSIO_HW_CFG_DEV2G5_10_MODE, 0),
+       SERDES_MUX(SERDES6G(2), 10, PHY_MODE_PCIE, HSIO_HW_CFG_PCIE_ENA,
+                  HSIO_HW_CFG_PCIE_ENA),
+};
+
+static int serdes_set_mode(struct phy *phy, enum phy_mode mode)
+{
+       struct serdes_macro *macro = phy_get_drvdata(phy);
+       unsigned int i;
+       int ret;
+
+       for (i = 0; i < ARRAY_SIZE(ocelot_serdes_muxes); i++) {
+               if (macro->idx != ocelot_serdes_muxes[i].idx ||
+                   mode != ocelot_serdes_muxes[i].mode)
+                       continue;
+
+               if (mode != PHY_MODE_QSGMII &&
+                   macro->port != ocelot_serdes_muxes[i].port)
+                       continue;
+
+               ret = regmap_update_bits(macro->ctrl->regs, HSIO_HW_CFG,
+                                        ocelot_serdes_muxes[i].mask,
+                                        ocelot_serdes_muxes[i].mux);
+               if (ret)
+                       return ret;
+
+               if (macro->idx <= SERDES1G_MAX)
+                       return serdes_init_s1g(macro->ctrl->regs, macro->idx);
+
+               /* SERDES6G and PCIe not supported yet */
+               return -EOPNOTSUPP;
+       }
+
+       return -EINVAL;
+}
+
+static const struct phy_ops serdes_ops = {
+       .set_mode       = serdes_set_mode,
+       .owner          = THIS_MODULE,
+};
+
+static struct phy *serdes_simple_xlate(struct device *dev,
+                                      struct of_phandle_args *args)
+{
+       struct serdes_ctrl *ctrl = dev_get_drvdata(dev);
+       unsigned int port, idx, i;
+
+       if (args->args_count != 2)
+               return ERR_PTR(-EINVAL);
+
+       port = args->args[0];
+       idx = args->args[1];
+
+       for (i = 0; i <= SERDES_MAX; i++) {
+               struct serdes_macro *macro = phy_get_drvdata(ctrl->phys[i]);
+
+               if (idx != macro->idx)
+                       continue;
+
+               /* SERDES6G(0) is the only SerDes capable of QSGMII */
+               if (idx != SERDES6G(0) && macro->port >= 0)
+                       return ERR_PTR(-EBUSY);
+
+               macro->port = port;
+               return ctrl->phys[i];
+       }
+
+       return ERR_PTR(-ENODEV);
+}
+
+static int serdes_phy_create(struct serdes_ctrl *ctrl, u8 idx, struct phy **phy)
+{
+       struct serdes_macro *macro;
+
+       *phy = devm_phy_create(ctrl->dev, NULL, &serdes_ops);
+       if (IS_ERR(*phy))
+               return PTR_ERR(*phy);
+
+       macro = devm_kzalloc(ctrl->dev, sizeof(*macro), GFP_KERNEL);
+       if (!macro)
+               return -ENOMEM;
+
+       macro->idx = idx;
+       macro->ctrl = ctrl;
+       macro->port = -1;
+
+       phy_set_drvdata(*phy, macro);
+
+       return 0;
+}
+
+static int serdes_probe(struct platform_device *pdev)
+{
+       struct phy_provider *provider;
+       struct serdes_ctrl *ctrl;
+       unsigned int i;
+       int ret;
+
+       ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+       if (!ctrl)
+               return -ENOMEM;
+
+       ctrl->dev = &pdev->dev;
+       ctrl->regs = syscon_node_to_regmap(pdev->dev.parent->of_node);
+       if (!ctrl->regs)
+               return -ENODEV;
+
+       for (i = 0; i <= SERDES_MAX; i++) {
+               ret = serdes_phy_create(ctrl, i, &ctrl->phys[i]);
+               if (ret)
+                       return ret;
+       }
+
+       dev_set_drvdata(&pdev->dev, ctrl);
+
+       provider = devm_of_phy_provider_register(ctrl->dev,
+                                                serdes_simple_xlate);
+
+       return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id serdes_ids[] = {
+       { .compatible = "mscc,vsc7514-serdes", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, serdes_ids);
+
+static struct platform_driver mscc_ocelot_serdes = {
+       .probe          = serdes_probe,
+       .driver         = {
+               .name   = "mscc,ocelot-serdes",
+               .of_match_table = of_match_ptr(serdes_ids),
+       },
+};
+
+module_platform_driver(mscc_ocelot_serdes);
+
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@bootlin.com>");
+MODULE_DESCRIPTION("SerDes driver for Microsemi Ocelot");
+MODULE_LICENSE("Dual MIT/GPL");
index 8d48371caaa2df51568fd70996e2b4594a24a311..e7f45d96b0cbd61e4cf7adfb050fbde547bcf752 100644 (file)
 
 #include "pinctrl-intel.h"
 
-#define CNL_PAD_OWN    0x020
-#define CNL_PADCFGLOCK 0x080
-#define CNL_HOSTSW_OWN 0x0b0
-#define CNL_GPI_IE     0x120
+#define CNL_PAD_OWN            0x020
+#define CNL_PADCFGLOCK         0x080
+#define CNL_LP_HOSTSW_OWN      0x0b0
+#define CNL_H_HOSTSW_OWN       0x0c0
+#define CNL_GPI_IE             0x120
 
 #define CNL_GPP(r, s, e, g)                            \
        {                                               \
 
 #define CNL_NO_GPIO    -1
 
-#define CNL_COMMUNITY(b, s, e, g)                      \
+#define CNL_COMMUNITY(b, s, e, o, g)                   \
        {                                               \
                .barno = (b),                           \
                .padown_offset = CNL_PAD_OWN,           \
                .padcfglock_offset = CNL_PADCFGLOCK,    \
-               .hostown_offset = CNL_HOSTSW_OWN,       \
+               .hostown_offset = (o),                  \
                .ie_offset = CNL_GPI_IE,                \
                .pin_base = (s),                        \
                .npins = ((e) - (s) + 1),               \
                .ngpps = ARRAY_SIZE(g),                 \
        }
 
+#define CNLLP_COMMUNITY(b, s, e, g)                    \
+       CNL_COMMUNITY(b, s, e, CNL_LP_HOSTSW_OWN, g)
+
+#define CNLH_COMMUNITY(b, s, e, g)                     \
+       CNL_COMMUNITY(b, s, e, CNL_H_HOSTSW_OWN, g)
+
 /* Cannon Lake-H */
 static const struct pinctrl_pin_desc cnlh_pins[] = {
        /* GPP_A */
@@ -442,10 +449,10 @@ static const struct intel_function cnlh_functions[] = {
 };
 
 static const struct intel_community cnlh_communities[] = {
-       CNL_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
-       CNL_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
-       CNL_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
-       CNL_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
+       CNLH_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
+       CNLH_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
+       CNLH_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
+       CNLH_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
 };
 
 static const struct intel_pinctrl_soc_data cnlh_soc_data = {
@@ -803,9 +810,9 @@ static const struct intel_padgroup cnllp_community4_gpps[] = {
 };
 
 static const struct intel_community cnllp_communities[] = {
-       CNL_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
-       CNL_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
-       CNL_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
+       CNLLP_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
+       CNLLP_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
+       CNLLP_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
 };
 
 static const struct intel_pinctrl_soc_data cnllp_soc_data = {
index ec8dafc946943261bbefa67d948a03a6cce9fffa..1ea3438ea67e925aa82b6e57e503efb72689fde3 100644 (file)
@@ -887,36 +887,6 @@ static const struct gpio_chip intel_gpio_chip = {
        .set_config = gpiochip_generic_config,
 };
 
-static int intel_gpio_irq_reqres(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
-       int pin;
-       int ret;
-
-       pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
-       if (pin >= 0) {
-               ret = gpiochip_lock_as_irq(gc, pin);
-               if (ret) {
-                       dev_err(pctrl->dev, "unable to lock HW IRQ %d for IRQ\n",
-                               pin);
-                       return ret;
-               }
-       }
-       return 0;
-}
-
-static void intel_gpio_irq_relres(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
-       int pin;
-
-       pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
-       if (pin >= 0)
-               gpiochip_unlock_as_irq(gc, pin);
-}
-
 static void intel_gpio_irq_ack(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1132,8 +1102,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
 
 static struct irq_chip intel_gpio_irqchip = {
        .name = "intel-gpio",
-       .irq_request_resources = intel_gpio_irq_reqres,
-       .irq_release_resources = intel_gpio_irq_relres,
        .irq_enable = intel_gpio_irq_enable,
        .irq_ack = intel_gpio_irq_ack,
        .irq_mask = intel_gpio_irq_mask,
index 41ccc759b8b8867a09b992371eafafca35899a0e..1425c2874d4028b5140cc74933733a2bf4f8f22b 100644 (file)
@@ -348,21 +348,12 @@ static void amd_gpio_irq_enable(struct irq_data *d)
        unsigned long flags;
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
-       u32 mask = BIT(INTERRUPT_ENABLE_OFF) | BIT(INTERRUPT_MASK_OFF);
 
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
        pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
        pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
        pin_reg |= BIT(INTERRUPT_MASK_OFF);
        writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
-       /*
-        * When debounce logic is enabled it takes ~900 us before interrupts
-        * can be enabled.  During this "debounce warm up" period the
-        * "INTERRUPT_ENABLE" bit will read as 0. Poll the bit here until it
-        * reads back as 1, signaling that interrupts are now enabled.
-        */
-       while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
-               continue;
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 }
 
@@ -426,7 +417,7 @@ static void amd_gpio_irq_eoi(struct irq_data *d)
 static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
        int ret = 0;
-       u32 pin_reg;
+       u32 pin_reg, pin_reg_irq_en, mask;
        unsigned long flags, irq_flags;
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
@@ -495,6 +486,28 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        }
 
        pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF;
+       /*
+        * If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the
+        * debounce registers of any GPIO will block wake/interrupt status
+        * generation for *all* GPIOs for a lenght of time that depends on
+        * WAKE_INT_MASTER_REG.MaskStsLength[11:0].  During this period the
+        * INTERRUPT_ENABLE bit will read as 0.
+        *
+        * We temporarily enable irq for the GPIO whose configuration is
+        * changing, and then wait for it to read back as 1 to know when
+        * debounce has settled and then disable the irq again.
+        * We do this polling with the spinlock held to ensure other GPIO
+        * access routines do not read an incorrect value for the irq enable
+        * bit of other GPIOs.  We keep the GPIO masked while polling to avoid
+        * spurious irqs, and disable the irq again after polling.
+        */
+       mask = BIT(INTERRUPT_ENABLE_OFF);
+       pin_reg_irq_en = pin_reg;
+       pin_reg_irq_en |= mask;
+       pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF);
+       writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4);
+       while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
+               continue;
        writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
index 0f8ac8dec3e16a21e066e9f76318d14ec6c9d78f..a1bd8aaf4d983bcd8b8bdfa10ae944918fabeb75 100644 (file)
@@ -569,6 +569,25 @@ static int bd71837_probe(struct platform_device *pdev)
                        BD71837_REG_REGLOCK);
        }
 
+       /*
+        * There is a HW quirk in BD71837. The shutdown sequence timings for
+        * bucks/LDOs which are controlled via register interface are changed.
+        * At PMIC poweroff the voltage for BUCK6/7 is cut immediately at the
+        * beginning of shut-down sequence. As bucks 6 and 7 are parent
+        * supplies for LDO5 and LDO6 - this causes LDO5/6 voltage
+        * monitoring to errorneously detect under voltage and force PMIC to
+        * emergency state instead of poweroff. In order to avoid this we
+        * disable voltage monitoring for LDO5 and LDO6
+        */
+       err = regmap_update_bits(pmic->mfd->regmap, BD718XX_REG_MVRFLTMASK2,
+                                BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80,
+                                BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80);
+       if (err) {
+               dev_err(&pmic->pdev->dev,
+                       "Failed to disable voltage monitoring\n");
+               goto err;
+       }
+
        for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) {
 
                struct regulator_desc *desc;
index bb1324f93143f66e609fea2602329230f70ed4ce..9577d89418468a06f1030ff69d2699f71b5710bc 100644 (file)
@@ -3161,7 +3161,7 @@ static inline int regulator_suspend_toggle(struct regulator_dev *rdev,
        if (!rstate->changeable)
                return -EPERM;
 
-       rstate->enabled = en;
+       rstate->enabled = (en) ? ENABLE_IN_SUSPEND : DISABLE_IN_SUSPEND;
 
        return 0;
 }
@@ -4395,13 +4395,13 @@ regulator_register(const struct regulator_desc *regulator_desc,
            !rdev->desc->fixed_uV)
                rdev->is_switch = true;
 
+       dev_set_drvdata(&rdev->dev, rdev);
        ret = device_register(&rdev->dev);
        if (ret != 0) {
                put_device(&rdev->dev);
                goto unset_supplies;
        }
 
-       dev_set_drvdata(&rdev->dev, rdev);
        rdev_init_debugfs(rdev);
 
        /* try to resolve regulators supply since a new one was registered */
index 638f17d4c8485e11fa7d0a9486fc3ec131a341cd..210fc20f7de7a9cd26dbbee68e24a7c0bc2dc94a 100644 (file)
@@ -213,8 +213,6 @@ static void of_get_regulation_constraints(struct device_node *np,
                else if (of_property_read_bool(suspend_np,
                                        "regulator-off-in-suspend"))
                        suspend_state->enabled = DISABLE_IN_SUSPEND;
-               else
-                       suspend_state->enabled = DO_NOTHING_IN_SUSPEND;
 
                if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
                                          &pval))
index 0078b5d217cc4f46821344ebdaa1ad2a620b2874..1771d0073c0c767a1d5d552e055721a658d8bede 100644 (file)
@@ -610,7 +610,7 @@ static void qeth_put_reply(struct qeth_reply *reply)
 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
                struct qeth_card *card)
 {
-       char *ipa_name;
+       const char *ipa_name;
        int com = cmd->hdr.command;
        ipa_name = qeth_get_ipa_cmd_name(com);
        if (rc)
index 5bcb8dafc3ee506f9ff3487bdf8e01cf274e35b2..e891c0b52f4ccc79995b6c60a8a05e6e12f92005 100644 (file)
@@ -148,10 +148,10 @@ EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
 
 struct ipa_rc_msg {
        enum qeth_ipa_return_codes rc;
-       char *msg;
+       const char *msg;
 };
 
-static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
+static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
        {IPA_RC_SUCCESS,                "success"},
        {IPA_RC_NOTSUPP,                "Command not supported"},
        {IPA_RC_IP_TABLE_FULL,          "Add Addr IP Table Full - ipv6"},
@@ -219,23 +219,23 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
 
 
 
-char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
+const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
 {
-       int x = 0;
-       qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
-                       sizeof(struct ipa_rc_msg) - 1].rc = rc;
-       while (qeth_ipa_rc_msg[x].rc != rc)
-               x++;
+       int x;
+
+       for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++)
+               if (qeth_ipa_rc_msg[x].rc == rc)
+                       return qeth_ipa_rc_msg[x].msg;
        return qeth_ipa_rc_msg[x].msg;
 }
 
 
 struct ipa_cmd_names {
        enum qeth_ipa_cmds cmd;
-       char *name;
+       const char *name;
 };
 
-static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
+static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
        {IPA_CMD_STARTLAN,      "startlan"},
        {IPA_CMD_STOPLAN,       "stoplan"},
        {IPA_CMD_SETVMAC,       "setvmac"},
@@ -267,13 +267,12 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
        {IPA_CMD_UNKNOWN,       "unknown"},
 };
 
-char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
+const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
 {
-       int x = 0;
-       qeth_ipa_cmd_names[
-               sizeof(qeth_ipa_cmd_names) /
-                       sizeof(struct ipa_cmd_names)-1].cmd = cmd;
-       while (qeth_ipa_cmd_names[x].cmd != cmd)
-               x++;
+       int x;
+
+       for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++)
+               if (qeth_ipa_cmd_names[x].cmd == cmd)
+                       return qeth_ipa_cmd_names[x].name;
        return qeth_ipa_cmd_names[x].name;
 }
index aa8b9196b089e0c9b2788493d5103dd5b38ad33d..aa5de1fe01e10068b8913d814c27a9a63bdc95d9 100644 (file)
@@ -797,8 +797,8 @@ enum qeth_ipa_arp_return_codes {
        QETH_IPA_ARP_RC_Q_NO_DATA    = 0x0008,
 };
 
-extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
-extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
+extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
+extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
 
 #define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
                               sizeof(struct qeth_ipacmd_setassparms_hdr))
index ecb22749df0bfa4a4fc0596f8eb32a9b693c5004..8cc0151830433230e8f629c2660eec90871b7e8c 100644 (file)
@@ -2729,6 +2729,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
 {
        unsigned long addr;
 
+       if (!p)
+               return -ENODEV;
+
        addr = gen_pool_alloc(p, cnt);
        if (!addr)
                return -ENOMEM;
index c646d871386130d5dde7df2619f3b7fa7bcf5af6..681f7d4b7724fd2037257fab596fc13fc3bbc7d5 100644 (file)
@@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
 {
        u32 shift;
 
-       shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE;
+       shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
        shift -= tdm_num * 2;
 
        return shift;
index 0626e6e3ea0c05dee66e177aa4d2a0c75b703247..421bfc7dda67413bd72ae96055c3767d01a33fa7 100644 (file)
@@ -300,8 +300,8 @@ static int spi_gpio_request(struct device *dev,
                *mflags |= SPI_MASTER_NO_RX;
 
        spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
-       if (IS_ERR(spi_gpio->mosi))
-               return PTR_ERR(spi_gpio->mosi);
+       if (IS_ERR(spi_gpio->sck))
+               return PTR_ERR(spi_gpio->sck);
 
        for (i = 0; i < num_chipselects; i++) {
                spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs",
index 95dc4d78618df29c2b15d92fa6d11e3b84bb7ed1..b37de1d991d6abe1e0a25c5fffcf04d9851aba3e 100644 (file)
@@ -598,11 +598,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
 
        ret = wait_event_interruptible_timeout(rspi->wait,
                                               rspi->dma_callbacked, HZ);
-       if (ret > 0 && rspi->dma_callbacked)
+       if (ret > 0 && rspi->dma_callbacked) {
                ret = 0;
-       else if (!ret) {
-               dev_err(&rspi->master->dev, "DMA timeout\n");
-               ret = -ETIMEDOUT;
+       } else {
+               if (!ret) {
+                       dev_err(&rspi->master->dev, "DMA timeout\n");
+                       ret = -ETIMEDOUT;
+               }
                if (tx)
                        dmaengine_terminate_all(rspi->master->dma_tx);
                if (rx)
@@ -1350,12 +1352,36 @@ static const struct platform_device_id spi_driver_ids[] = {
 
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+       return spi_master_suspend(rspi->master);
+}
+
+static int rspi_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+       return spi_master_resume(rspi->master);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS     &rspi_pm_ops
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver rspi_driver = {
        .probe =        rspi_probe,
        .remove =       rspi_remove,
        .id_table =     spi_driver_ids,
        .driver         = {
                .name = "renesas_spi",
+               .pm = DEV_PM_OPS,
                .of_match_table = of_match_ptr(rspi_of_match),
        },
 };
index 539d6d1a277a6179f7053698cbee772083354828..101cd6aae2ea520afcac89671071cdabe2341f8f 100644 (file)
@@ -397,7 +397,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
 
 static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
 {
-       sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+       sh_msiof_write(p, STR,
+                      sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
 }
 
 static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -1426,12 +1427,37 @@ static const struct platform_device_id spi_driver_ids[] = {
 };
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int sh_msiof_spi_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+       return spi_master_suspend(p->master);
+}
+
+static int sh_msiof_spi_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+       return spi_master_resume(p->master);
+}
+
+static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
+                        sh_msiof_spi_resume);
+#define DEV_PM_OPS     &sh_msiof_spi_pm_ops
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver sh_msiof_spi_drv = {
        .probe          = sh_msiof_spi_probe,
        .remove         = sh_msiof_spi_remove,
        .id_table       = spi_driver_ids,
        .driver         = {
                .name           = "spi_sh_msiof",
+               .pm             = DEV_PM_OPS,
                .of_match_table = of_match_ptr(sh_msiof_match),
        },
 };
index 6f7b946b5cedf103cbed9886d7ec3079bbdcf6de..1427f343b39a3dc4468e14c3612814508993b3c0 100644 (file)
@@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev)
                goto exit_free_master;
        }
 
+       /* disabled clock may cause interrupt storm upon request */
+       tspi->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(tspi->clk)) {
+               ret = PTR_ERR(tspi->clk);
+               dev_err(&pdev->dev, "Can not get clock %d\n", ret);
+               goto exit_free_master;
+       }
+       ret = clk_prepare(tspi->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
+               goto exit_free_master;
+       }
+       ret = clk_enable(tspi->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
+               goto exit_free_master;
+       }
+
        spi_irq = platform_get_irq(pdev, 0);
        tspi->irq = spi_irq;
        ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
@@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
        if (ret < 0) {
                dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
                                        tspi->irq);
-               goto exit_free_master;
-       }
-
-       tspi->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(tspi->clk)) {
-               dev_err(&pdev->dev, "can not get clock\n");
-               ret = PTR_ERR(tspi->clk);
-               goto exit_free_irq;
+               goto exit_clk_disable;
        }
 
        tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
@@ -1138,6 +1149,8 @@ exit_rx_dma_free:
        tegra_slink_deinit_dma_param(tspi, true);
 exit_free_irq:
        free_irq(spi_irq, tspi);
+exit_clk_disable:
+       clk_disable(tspi->clk);
 exit_free_master:
        spi_master_put(master);
        return ret;
@@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev)
 
        free_irq(tspi->irq, tspi);
 
+       clk_disable(tspi->clk);
+
        if (tspi->tx_dma_chan)
                tegra_slink_deinit_dma_param(tspi, false);
 
index 99a4656d113d5dc67233e3dfd24dc608638da257..3861cb659cb941aa46414686d60797592750ae2f 100644 (file)
@@ -425,7 +425,7 @@ void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc,
                        *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2);
                        break;
                }
-               /* Fallthough */
+               /* Fall through */
        default:
                *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB);
        }
index 59aaae7268cd64590f2b521e318b378f97514896..991e154c0eca6568a7de59ee9775363ccdfb4ad6 100644 (file)
@@ -16,11 +16,3 @@ config FSL_DPAA2_ETHSW
        ---help---
        Driver for Freescale DPAA2 Ethernet Switch. Select
        BRIDGE to have support for bridge tools.
-
-config FSL_DPAA2_PTP_CLOCK
-       tristate "Freescale DPAA2 PTP Clock"
-       depends on FSL_DPAA2_ETH && POSIX_TIMERS
-       select PTP_1588_CLOCK
-       help
-         This driver adds support for using the DPAA2 1588 timer module
-         as a PTP clock.
index 464f242a8a4ef3ea71091c600f91145193488afe..c92ab98c27d981b263ea75e88e4116248659fe9a 100644 (file)
@@ -3,4 +3,3 @@
 #
 
 obj-$(CONFIG_FSL_DPAA2_ETHSW)          += ethsw/
-obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK)      += rtc/
diff --git a/drivers/staging/fsl-dpaa2/rtc/Makefile b/drivers/staging/fsl-dpaa2/rtc/Makefile
deleted file mode 100644 (file)
index 5468da0..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the Freescale DPAA2 PTP clock
-#
-
-obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += dpaa2-rtc.o
-
-dpaa2-rtc-objs := rtc.o dprtc.o
diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
deleted file mode 100644 (file)
index db6a473..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2018 NXP
- */
-
-#ifndef _FSL_DPRTC_CMD_H
-#define _FSL_DPRTC_CMD_H
-
-/* DPRTC Version */
-#define DPRTC_VER_MAJOR                        2
-#define DPRTC_VER_MINOR                        0
-
-/* Command versioning */
-#define DPRTC_CMD_BASE_VERSION         1
-#define DPRTC_CMD_ID_OFFSET            4
-
-#define DPRTC_CMD(id)  (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
-
-/* Command IDs */
-#define DPRTC_CMDID_CLOSE                      DPRTC_CMD(0x800)
-#define DPRTC_CMDID_OPEN                       DPRTC_CMD(0x810)
-#define DPRTC_CMDID_CREATE                     DPRTC_CMD(0x910)
-#define DPRTC_CMDID_DESTROY                    DPRTC_CMD(0x990)
-#define DPRTC_CMDID_GET_API_VERSION            DPRTC_CMD(0xa10)
-
-#define DPRTC_CMDID_ENABLE                     DPRTC_CMD(0x002)
-#define DPRTC_CMDID_DISABLE                    DPRTC_CMD(0x003)
-#define DPRTC_CMDID_GET_ATTR                   DPRTC_CMD(0x004)
-#define DPRTC_CMDID_RESET                      DPRTC_CMD(0x005)
-#define DPRTC_CMDID_IS_ENABLED                 DPRTC_CMD(0x006)
-
-#define DPRTC_CMDID_SET_IRQ_ENABLE             DPRTC_CMD(0x012)
-#define DPRTC_CMDID_GET_IRQ_ENABLE             DPRTC_CMD(0x013)
-#define DPRTC_CMDID_SET_IRQ_MASK               DPRTC_CMD(0x014)
-#define DPRTC_CMDID_GET_IRQ_MASK               DPRTC_CMD(0x015)
-#define DPRTC_CMDID_GET_IRQ_STATUS             DPRTC_CMD(0x016)
-#define DPRTC_CMDID_CLEAR_IRQ_STATUS           DPRTC_CMD(0x017)
-
-#define DPRTC_CMDID_SET_CLOCK_OFFSET           DPRTC_CMD(0x1d0)
-#define DPRTC_CMDID_SET_FREQ_COMPENSATION      DPRTC_CMD(0x1d1)
-#define DPRTC_CMDID_GET_FREQ_COMPENSATION      DPRTC_CMD(0x1d2)
-#define DPRTC_CMDID_GET_TIME                   DPRTC_CMD(0x1d3)
-#define DPRTC_CMDID_SET_TIME                   DPRTC_CMD(0x1d4)
-#define DPRTC_CMDID_SET_ALARM                  DPRTC_CMD(0x1d5)
-#define DPRTC_CMDID_SET_PERIODIC_PULSE         DPRTC_CMD(0x1d6)
-#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE       DPRTC_CMD(0x1d7)
-#define DPRTC_CMDID_SET_EXT_TRIGGER            DPRTC_CMD(0x1d8)
-#define DPRTC_CMDID_CLEAR_EXT_TRIGGER          DPRTC_CMD(0x1d9)
-#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP  DPRTC_CMD(0x1dA)
-
-/* Macros for accessing command fields smaller than 1byte */
-#define DPRTC_MASK(field)        \
-       GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \
-               DPRTC_##field##_SHIFT)
-#define dprtc_get_field(var, field)      \
-       (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT)
-
-#pragma pack(push, 1)
-struct dprtc_cmd_open {
-       __le32 dprtc_id;
-};
-
-struct dprtc_cmd_destroy {
-       __le32 object_id;
-};
-
-#define DPRTC_ENABLE_SHIFT     0
-#define DPRTC_ENABLE_SIZE      1
-
-struct dprtc_rsp_is_enabled {
-       u8 en;
-};
-
-struct dprtc_cmd_get_irq {
-       __le32 pad;
-       u8 irq_index;
-};
-
-struct dprtc_cmd_set_irq_enable {
-       u8 en;
-       u8 pad[3];
-       u8 irq_index;
-};
-
-struct dprtc_rsp_get_irq_enable {
-       u8 en;
-};
-
-struct dprtc_cmd_set_irq_mask {
-       __le32 mask;
-       u8 irq_index;
-};
-
-struct dprtc_rsp_get_irq_mask {
-       __le32 mask;
-};
-
-struct dprtc_cmd_get_irq_status {
-       __le32 status;
-       u8 irq_index;
-};
-
-struct dprtc_rsp_get_irq_status {
-       __le32 status;
-};
-
-struct dprtc_cmd_clear_irq_status {
-       __le32 status;
-       u8 irq_index;
-};
-
-struct dprtc_rsp_get_attributes {
-       __le32 pad;
-       __le32 id;
-};
-
-struct dprtc_cmd_set_clock_offset {
-       __le64 offset;
-};
-
-struct dprtc_get_freq_compensation {
-       __le32 freq_compensation;
-};
-
-struct dprtc_time {
-       __le64 time;
-};
-
-struct dprtc_rsp_get_api_version {
-       __le16 major;
-       __le16 minor;
-};
-
-#pragma pack(pop)
-
-#endif /* _FSL_DPRTC_CMD_H */
diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.c b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
deleted file mode 100644 (file)
index 68ae6ff..0000000
+++ /dev/null
@@ -1,701 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2018 NXP
- */
-
-#include <linux/fsl/mc.h>
-
-#include "dprtc.h"
-#include "dprtc-cmd.h"
-
-/**
- * dprtc_open() - Open a control session for the specified object.
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @dprtc_id:  DPRTC unique ID
- * @token:     Returned token; use in subsequent API calls
- *
- * This function can be used to open a control session for an
- * already created object; an object may have been declared in
- * the DPL or by calling the dprtc_create function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent commands for
- * this specific object
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_open(struct fsl_mc_io *mc_io,
-              u32 cmd_flags,
-              int dprtc_id,
-              u16 *token)
-{
-       struct dprtc_cmd_open *cmd_params;
-       struct fsl_mc_command cmd = { 0 };
-       int err;
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
-                                         cmd_flags,
-                                         0);
-       cmd_params = (struct dprtc_cmd_open *)cmd.params;
-       cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
-
-       err = mc_send_command(mc_io, &cmd);
-       if (err)
-               return err;
-
-       *token = mc_cmd_hdr_read_token(&cmd);
-
-       return 0;
-}
-
-/**
- * dprtc_close() - Close the control session of the object
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_close(struct fsl_mc_io *mc_io,
-               u32 cmd_flags,
-               u16 token)
-{
-       struct fsl_mc_command cmd = { 0 };
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
-                                         token);
-
-       return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_create() - Create the DPRTC object.
- * @mc_io:     Pointer to MC portal's I/O object
- * @dprc_token:        Parent container token; '0' for default container
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg:       Configuration structure
- * @obj_id:    Returned object id
- *
- * Create the DPRTC object, allocate required resources and
- * perform required initialization.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_create(struct fsl_mc_io *mc_io,
-                u16 dprc_token,
-                u32 cmd_flags,
-                const struct dprtc_cfg *cfg,
-                u32 *obj_id)
-{
-       struct fsl_mc_command cmd = { 0 };
-       int err;
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
-                                         cmd_flags,
-                                         dprc_token);
-
-       err = mc_send_command(mc_io, &cmd);
-       if (err)
-               return err;
-
-       *obj_id = mc_cmd_read_object_id(&cmd);
-
-       return 0;
-}
-
-/**
- * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
- * @mc_io:     Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id: The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return:     '0' on Success; error code otherwise.
- */
-int dprtc_destroy(struct fsl_mc_io *mc_io,
-                 u16 dprc_token,
-                 u32 cmd_flags,
-                 u32 object_id)
-{
-       struct dprtc_cmd_destroy *cmd_params;
-       struct fsl_mc_command cmd = { 0 };
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
-                                         cmd_flags,
-                                         dprc_token);
-       cmd_params = (struct dprtc_cmd_destroy *)cmd.params;
-       cmd_params->object_id = cpu_to_le32(object_id);
-
-       return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_enable() - Enable the DPRTC.
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_enable(struct fsl_mc_io *mc_io,
-                u32 cmd_flags,
-                u16 token)
-{
-       struct fsl_mc_command cmd = { 0 };
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
-                                         token);
-
-       return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_disable() - Disable the DPRTC.
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_disable(struct fsl_mc_io *mc_io,
-                 u32 cmd_flags,
-                 u16 token)
-{
-       struct fsl_mc_command cmd = { 0 };
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
-                                         cmd_flags,
-                                         token);
-
-       return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_is_enabled() - Check if the DPRTC is enabled.
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- * @en:                Returns '1' if object is enabled; '0' otherwise
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_is_enabled(struct fsl_mc_io *mc_io,
-                    u32 cmd_flags,
-                    u16 token,
-                    int *en)
-{
-       struct dprtc_rsp_is_enabled *rsp_params;
-       struct fsl_mc_command cmd = { 0 };
-       int err;
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
-                                         token);
-
-       err = mc_send_command(mc_io, &cmd);
-       if (err)
-               return err;
-
-       rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params;
-       *en = dprtc_get_field(rsp_params->en, ENABLE);
-
-       return 0;
-}
-
-/**
- * dprtc_reset() - Reset the DPRTC, returns the object to initial state.
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_reset(struct fsl_mc_io *mc_io,
-               u32 cmd_flags,
-               u16 token)
-{
-       struct fsl_mc_command cmd = { 0 };
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
-                                         cmd_flags,
-                                         token);
-
-       return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_set_irq_enable() - Set overall interrupt state.
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- * @irq_index: The interrupt index to configure
- * @en:                Interrupt state - enable = 1, disable = 0
- *
- * Allows GPP software to control when interrupts are generated.
- * Each interrupt can have up to 32 causes.  The enable/disable control's the
- * overall interrupt state. if the interrupt is disabled no causes will cause
- * an interrupt.
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
-                        u32 cmd_flags,
-                        u16 token,
-                        u8 irq_index,
-                        u8 en)
-{
-       struct dprtc_cmd_set_irq_enable *cmd_params;
-       struct fsl_mc_command cmd = { 0 };
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
-                                         cmd_flags,
-                                         token);
-       cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
-       cmd_params->irq_index = irq_index;
-       cmd_params->en = en;
-
-       return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_irq_enable() - Get overall interrupt state
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- * @irq_index: The interrupt index to configure
- * @en:                Returned interrupt state - enable = 1, disable = 0
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
-                        u32 cmd_flags,
-                        u16 token,
-                        u8 irq_index,
-                        u8 *en)
-{
-       struct dprtc_rsp_get_irq_enable *rsp_params;
-       struct dprtc_cmd_get_irq *cmd_params;
-       struct fsl_mc_command cmd = { 0 };
-       int err;
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
-                                         cmd_flags,
-                                         token);
-       cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
-       cmd_params->irq_index = irq_index;
-
-       err = mc_send_command(mc_io, &cmd);
-       if (err)
-               return err;
-
-       rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
-       *en = rsp_params->en;
-
-       return 0;
-}
-
-/**
- * dprtc_set_irq_mask() - Set interrupt mask.
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- * @irq_index: The interrupt index to configure
- * @mask:      Event mask to trigger interrupt;
- *             each bit:
- *                     0 = ignore event
- *                     1 = consider event for asserting IRQ
- *
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
-                      u32 cmd_flags,
-                      u16 token,
-                      u8 irq_index,
-                      u32 mask)
-{
-       struct dprtc_cmd_set_irq_mask *cmd_params;
-       struct fsl_mc_command cmd = { 0 };
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
-                                         cmd_flags,
-                                         token);
-       cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
-       cmd_params->mask = cpu_to_le32(mask);
-       cmd_params->irq_index = irq_index;
-
-       return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_irq_mask() - Get interrupt mask.
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- * @irq_index: The interrupt index to configure
- * @mask:      Returned event mask to trigger interrupt
- *
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
-                      u32 cmd_flags,
-                      u16 token,
-                      u8 irq_index,
-                      u32 *mask)
-{
-       struct dprtc_rsp_get_irq_mask *rsp_params;
-       struct dprtc_cmd_get_irq *cmd_params;
-       struct fsl_mc_command cmd = { 0 };
-       int err;
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
-                                         cmd_flags,
-                                         token);
-       cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
-       cmd_params->irq_index = irq_index;
-
-       err = mc_send_command(mc_io, &cmd);
-       if (err)
-               return err;
-
-       rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
-       *mask = le32_to_cpu(rsp_params->mask);
-
-       return 0;
-}
-
-/**
- * dprtc_get_irq_status() - Get the current status of any pending interrupts.
- *
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- * @irq_index: The interrupt index to configure
- * @status:    Returned interrupts status - one bit per cause:
- *                     0 = no interrupt pending
- *                     1 = interrupt pending
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
-                        u32 cmd_flags,
-                        u16 token,
-                        u8 irq_index,
-                        u32 *status)
-{
-       struct dprtc_cmd_get_irq_status *cmd_params;
-       struct dprtc_rsp_get_irq_status *rsp_params;
-       struct fsl_mc_command cmd = { 0 };
-       int err;
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
-                                         cmd_flags,
-                                         token);
-       cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
-       cmd_params->status = cpu_to_le32(*status);
-       cmd_params->irq_index = irq_index;
-
-       err = mc_send_command(mc_io, &cmd);
-       if (err)
-               return err;
-
-       rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
-       *status = le32_to_cpu(rsp_params->status);
-
-       return 0;
-}
-
-/**
- * dprtc_clear_irq_status() - Clear a pending interrupt's status
- *
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- * @irq_index: The interrupt index to configure
- * @status:    Bits to clear (W1C) - one bit per cause:
- *                     0 = don't change
- *                     1 = clear status bit
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
-                          u32 cmd_flags,
-                          u16 token,
-                          u8 irq_index,
-                          u32 status)
-{
-       struct dprtc_cmd_clear_irq_status *cmd_params;
-       struct fsl_mc_command cmd = { 0 };
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
-                                         cmd_flags,
-                                         token);
-       cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
-       cmd_params->irq_index = irq_index;
-       cmd_params->status = cpu_to_le32(status);
-
-       return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_attributes - Retrieve DPRTC attributes.
- *
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- * @attr:      Returned object's attributes
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_get_attributes(struct fsl_mc_io *mc_io,
-                        u32 cmd_flags,
-                        u16 token,
-                        struct dprtc_attr *attr)
-{
-       struct dprtc_rsp_get_attributes *rsp_params;
-       struct fsl_mc_command cmd = { 0 };
-       int err;
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR,
-                                         cmd_flags,
-                                         token);
-
-       err = mc_send_command(mc_io, &cmd);
-       if (err)
-               return err;
-
-       rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params;
-       attr->id = le32_to_cpu(rsp_params->id);
-
-       return 0;
-}
-
-/**
- * dprtc_set_clock_offset() - Sets the clock's offset
- * (usually relative to another clock).
- *
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- * @offset:    New clock offset (in nanoseconds).
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
-                          u32 cmd_flags,
-                          u16 token,
-                          int64_t offset)
-{
-       struct dprtc_cmd_set_clock_offset *cmd_params;
-       struct fsl_mc_command cmd = { 0 };
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
-                                         cmd_flags,
-                                         token);
-       cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params;
-       cmd_params->offset = cpu_to_le64(offset);
-
-       return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
- *
- * @mc_io:             Pointer to MC portal's I/O object
- * @cmd_flags:         Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:             Token of DPRTC object
- * @freq_compensation: The new frequency compensation value to set.
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
-                               u32 cmd_flags,
-                               u16 token,
-                               u32 freq_compensation)
-{
-       struct dprtc_get_freq_compensation *cmd_params;
-       struct fsl_mc_command cmd = { 0 };
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
-                                         cmd_flags,
-                                         token);
-       cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
-       cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
-
-       return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
- *
- * @mc_io:             Pointer to MC portal's I/O object
- * @cmd_flags:         Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:             Token of DPRTC object
- * @freq_compensation: Frequency compensation value
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
-                               u32 cmd_flags,
-                               u16 token,
-                               u32 *freq_compensation)
-{
-       struct dprtc_get_freq_compensation *rsp_params;
-       struct fsl_mc_command cmd = { 0 };
-       int err;
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
-                                         cmd_flags,
-                                         token);
-
-       err = mc_send_command(mc_io, &cmd);
-       if (err)
-               return err;
-
-       rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
-       *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
-
-       return 0;
-}
-
-/**
- * dprtc_get_time() - Returns the current RTC time.
- *
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- * @time:      Current RTC time.
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_get_time(struct fsl_mc_io *mc_io,
-                  u32 cmd_flags,
-                  u16 token,
-                  uint64_t *time)
-{
-       struct dprtc_time *rsp_params;
-       struct fsl_mc_command cmd = { 0 };
-       int err;
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
-                                         cmd_flags,
-                                         token);
-
-       err = mc_send_command(mc_io, &cmd);
-       if (err)
-               return err;
-
-       rsp_params = (struct dprtc_time *)cmd.params;
-       *time = le64_to_cpu(rsp_params->time);
-
-       return 0;
-}
-
-/**
- * dprtc_set_time() - Updates current RTC time.
- *
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- * @time:      New RTC time.
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_set_time(struct fsl_mc_io *mc_io,
-                  u32 cmd_flags,
-                  u16 token,
-                  uint64_t time)
-{
-       struct dprtc_time *cmd_params;
-       struct fsl_mc_command cmd = { 0 };
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
-                                         cmd_flags,
-                                         token);
-       cmd_params = (struct dprtc_time *)cmd.params;
-       cmd_params->time = cpu_to_le64(time);
-
-       return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_set_alarm() - Defines and sets alarm.
- *
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:     Token of DPRTC object
- * @time:      In nanoseconds, the time when the alarm
- *                     should go off - must be a multiple of
- *                     1 microsecond
- *
- * Return:     '0' on Success; Error code otherwise.
- */
-int dprtc_set_alarm(struct fsl_mc_io *mc_io,
-                   u32 cmd_flags,
-                   u16 token, uint64_t time)
-{
-       struct dprtc_time *cmd_params;
-       struct fsl_mc_command cmd = { 0 };
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
-                                         cmd_flags,
-                                         token);
-       cmd_params = (struct dprtc_time *)cmd.params;
-       cmd_params->time = cpu_to_le64(time);
-
-       return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_api_version() - Get Data Path Real Time Counter API version
- * @mc_io:     Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver: Major version of data path real time counter API
- * @minor_ver: Minor version of data path real time counter API
- *
- * Return:  '0' on Success; Error code otherwise.
- */
-int dprtc_get_api_version(struct fsl_mc_io *mc_io,
-                         u32 cmd_flags,
-                         u16 *major_ver,
-                         u16 *minor_ver)
-{
-       struct dprtc_rsp_get_api_version *rsp_params;
-       struct fsl_mc_command cmd = { 0 };
-       int err;
-
-       cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
-                                         cmd_flags,
-                                         0);
-
-       err = mc_send_command(mc_io, &cmd);
-       if (err)
-               return err;
-
-       rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params;
-       *major_ver = le16_to_cpu(rsp_params->major);
-       *minor_ver = le16_to_cpu(rsp_params->minor);
-
-       return 0;
-}
diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.h b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
deleted file mode 100644 (file)
index 08f7c7b..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2018 NXP
- */
-
-#ifndef __FSL_DPRTC_H
-#define __FSL_DPRTC_H
-
-/* Data Path Real Time Counter API
- * Contains initialization APIs and runtime control APIs for RTC
- */
-
-struct fsl_mc_io;
-
-/**
- * Number of irq's
- */
-#define DPRTC_MAX_IRQ_NUM                      1
-#define DPRTC_IRQ_INDEX                                0
-
-/**
- * Interrupt event masks:
- */
-
-/**
- * Interrupt event mask indicating alarm event had occurred
- */
-#define DPRTC_EVENT_ALARM                      0x40000000
-/**
- * Interrupt event mask indicating periodic pulse event had occurred
- */
-#define DPRTC_EVENT_PPS                                0x08000000
-
-int dprtc_open(struct fsl_mc_io *mc_io,
-              u32 cmd_flags,
-              int dprtc_id,
-              u16 *token);
-
-int dprtc_close(struct fsl_mc_io *mc_io,
-               u32 cmd_flags,
-               u16 token);
-
-/**
- * struct dprtc_cfg - Structure representing DPRTC configuration
- * @options:   place holder
- */
-struct dprtc_cfg {
-       u32 options;
-};
-
-int dprtc_create(struct fsl_mc_io *mc_io,
-                u16 dprc_token,
-                u32 cmd_flags,
-                const struct dprtc_cfg *cfg,
-                u32 *obj_id);
-
-int dprtc_destroy(struct fsl_mc_io *mc_io,
-                 u16 dprc_token,
-                 u32 cmd_flags,
-                 u32 object_id);
-
-int dprtc_enable(struct fsl_mc_io *mc_io,
-                u32 cmd_flags,
-                u16 token);
-
-int dprtc_disable(struct fsl_mc_io *mc_io,
-                 u32 cmd_flags,
-                 u16 token);
-
-int dprtc_is_enabled(struct fsl_mc_io *mc_io,
-                    u32 cmd_flags,
-                    u16 token,
-                    int *en);
-
-int dprtc_reset(struct fsl_mc_io *mc_io,
-               u32 cmd_flags,
-               u16 token);
-
-int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
-                          u32 cmd_flags,
-                          u16 token,
-                          int64_t offset);
-
-int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
-                               u32 cmd_flags,
-                               u16 token,
-                               u32 freq_compensation);
-
-int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
-                               u32 cmd_flags,
-                               u16 token,
-                               u32 *freq_compensation);
-
-int dprtc_get_time(struct fsl_mc_io *mc_io,
-                  u32 cmd_flags,
-                  u16 token,
-                  uint64_t *time);
-
-int dprtc_set_time(struct fsl_mc_io *mc_io,
-                  u32 cmd_flags,
-                  u16 token,
-                  uint64_t time);
-
-int dprtc_set_alarm(struct fsl_mc_io *mc_io,
-                   u32 cmd_flags,
-                   u16 token,
-                   uint64_t time);
-
-int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
-                        u32 cmd_flags,
-                        u16 token,
-                        u8 irq_index,
-                        u8 en);
-
-int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
-                        u32 cmd_flags,
-                        u16 token,
-                        u8 irq_index,
-                        u8 *en);
-
-int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
-                      u32 cmd_flags,
-                      u16 token,
-                      u8 irq_index,
-                      u32 mask);
-
-int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
-                      u32 cmd_flags,
-                      u16 token,
-                      u8 irq_index,
-                      u32 *mask);
-
-int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
-                        u32 cmd_flags,
-                        u16 token,
-                        u8 irq_index,
-                        u32 *status);
-
-int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
-                          u32 cmd_flags,
-                          u16 token,
-                          u8 irq_index,
-                          u32 status);
-
-/**
- * struct dprtc_attr - Structure representing DPRTC attributes
- * @id:                DPRTC object ID
- */
-struct dprtc_attr {
-       int id;
-};
-
-int dprtc_get_attributes(struct fsl_mc_io *mc_io,
-                        u32 cmd_flags,
-                        u16 token,
-                        struct dprtc_attr *attr);
-
-int dprtc_get_api_version(struct fsl_mc_io *mc_io,
-                         u32 cmd_flags,
-                         u16 *major_ver,
-                         u16 *minor_ver);
-
-#endif /* __FSL_DPRTC_H */
diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.c b/drivers/staging/fsl-dpaa2/rtc/rtc.c
deleted file mode 100644 (file)
index 0d52cb8..0000000
+++ /dev/null
@@ -1,229 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2018 NXP
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/ptp_clock_kernel.h>
-#include <linux/fsl/mc.h>
-
-#include "rtc.h"
-
-struct ptp_dpaa2_priv {
-       struct fsl_mc_device *rtc_mc_dev;
-       struct ptp_clock *clock;
-       struct ptp_clock_info caps;
-       u32 freq_comp;
-};
-
-/* PTP clock operations */
-static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
-{
-       struct ptp_dpaa2_priv *ptp_dpaa2 =
-               container_of(ptp, struct ptp_dpaa2_priv, caps);
-       struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev;
-       struct device *dev = &mc_dev->dev;
-       u64 adj;
-       u32 diff, tmr_add;
-       int neg_adj = 0;
-       int err = 0;
-
-       if (ppb < 0) {
-               neg_adj = 1;
-               ppb = -ppb;
-       }
-
-       tmr_add = ptp_dpaa2->freq_comp;
-       adj = tmr_add;
-       adj *= ppb;
-       diff = div_u64(adj, 1000000000ULL);
-
-       tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
-
-       err = dprtc_set_freq_compensation(mc_dev->mc_io, 0,
-                                         mc_dev->mc_handle, tmr_add);
-       if (err)
-               dev_err(dev, "dprtc_set_freq_compensation err %d\n", err);
-       return 0;
-}
-
-static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
-       struct ptp_dpaa2_priv *ptp_dpaa2 =
-               container_of(ptp, struct ptp_dpaa2_priv, caps);
-       struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev;
-       struct device *dev = &mc_dev->dev;
-       s64 now;
-       int err = 0;
-
-       err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now);
-       if (err) {
-               dev_err(dev, "dprtc_get_time err %d\n", err);
-               return 0;
-       }
-
-       now += delta;
-
-       err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now);
-       if (err) {
-               dev_err(dev, "dprtc_set_time err %d\n", err);
-               return 0;
-       }
-       return 0;
-}
-
-static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
-{
-       struct ptp_dpaa2_priv *ptp_dpaa2 =
-               container_of(ptp, struct ptp_dpaa2_priv, caps);
-       struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev;
-       struct device *dev = &mc_dev->dev;
-       u64 ns;
-       u32 remainder;
-       int err = 0;
-
-       err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns);
-       if (err) {
-               dev_err(dev, "dprtc_get_time err %d\n", err);
-               return 0;
-       }
-
-       ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
-       ts->tv_nsec = remainder;
-       return 0;
-}
-
-static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
-                            const struct timespec64 *ts)
-{
-       struct ptp_dpaa2_priv *ptp_dpaa2 =
-               container_of(ptp, struct ptp_dpaa2_priv, caps);
-       struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev;
-       struct device *dev = &mc_dev->dev;
-       u64 ns;
-       int err = 0;
-
-       ns = ts->tv_sec * 1000000000ULL;
-       ns += ts->tv_nsec;
-
-       err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns);
-       if (err)
-               dev_err(dev, "dprtc_set_time err %d\n", err);
-       return 0;
-}
-
-static struct ptp_clock_info ptp_dpaa2_caps = {
-       .owner          = THIS_MODULE,
-       .name           = "DPAA2 PTP Clock",
-       .max_adj        = 512000,
-       .n_alarm        = 2,
-       .n_ext_ts       = 2,
-       .n_per_out      = 3,
-       .n_pins         = 0,
-       .pps            = 1,
-       .adjfreq        = ptp_dpaa2_adjfreq,
-       .adjtime        = ptp_dpaa2_adjtime,
-       .gettime64      = ptp_dpaa2_gettime,
-       .settime64      = ptp_dpaa2_settime,
-};
-
-static int rtc_probe(struct fsl_mc_device *mc_dev)
-{
-       struct device *dev = &mc_dev->dev;
-       struct ptp_dpaa2_priv *ptp_dpaa2;
-       u32 tmr_add = 0;
-       int err;
-
-       ptp_dpaa2 = kzalloc(sizeof(*ptp_dpaa2), GFP_KERNEL);
-       if (!ptp_dpaa2)
-               return -ENOMEM;
-
-       err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
-       if (err) {
-               dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
-               goto err_exit;
-       }
-
-       err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
-                        &mc_dev->mc_handle);
-       if (err) {
-               dev_err(dev, "dprtc_open err %d\n", err);
-               goto err_free_mcp;
-       }
-
-       ptp_dpaa2->rtc_mc_dev = mc_dev;
-
-       err = dprtc_get_freq_compensation(mc_dev->mc_io, 0,
-                                         mc_dev->mc_handle, &tmr_add);
-       if (err) {
-               dev_err(dev, "dprtc_get_freq_compensation err %d\n", err);
-               goto err_close;
-       }
-
-       ptp_dpaa2->freq_comp = tmr_add;
-       ptp_dpaa2->caps = ptp_dpaa2_caps;
-
-       ptp_dpaa2->clock = ptp_clock_register(&ptp_dpaa2->caps, dev);
-       if (IS_ERR(ptp_dpaa2->clock)) {
-               err = PTR_ERR(ptp_dpaa2->clock);
-               goto err_close;
-       }
-
-       dpaa2_phc_index = ptp_clock_index(ptp_dpaa2->clock);
-
-       dev_set_drvdata(dev, ptp_dpaa2);
-
-       return 0;
-
-err_close:
-       dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-err_free_mcp:
-       fsl_mc_portal_free(mc_dev->mc_io);
-err_exit:
-       kfree(ptp_dpaa2);
-       dev_set_drvdata(dev, NULL);
-       return err;
-}
-
-static int rtc_remove(struct fsl_mc_device *mc_dev)
-{
-       struct ptp_dpaa2_priv *ptp_dpaa2;
-       struct device *dev = &mc_dev->dev;
-
-       ptp_dpaa2 = dev_get_drvdata(dev);
-       ptp_clock_unregister(ptp_dpaa2->clock);
-
-       dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-       fsl_mc_portal_free(mc_dev->mc_io);
-
-       kfree(ptp_dpaa2);
-       dev_set_drvdata(dev, NULL);
-
-       return 0;
-}
-
-static const struct fsl_mc_device_id rtc_match_id_table[] = {
-       {
-               .vendor = FSL_MC_VENDOR_FREESCALE,
-               .obj_type = "dprtc",
-       },
-       {}
-};
-MODULE_DEVICE_TABLE(fslmc, rtc_match_id_table);
-
-static struct fsl_mc_driver rtc_drv = {
-       .driver = {
-               .name = KBUILD_MODNAME,
-               .owner = THIS_MODULE,
-       },
-       .probe = rtc_probe,
-       .remove = rtc_remove,
-       .match_id_table = rtc_match_id_table,
-};
-
-module_fsl_mc_driver(rtc_drv);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("DPAA2 PTP Clock Driver");
diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.h b/drivers/staging/fsl-dpaa2/rtc/rtc.h
deleted file mode 100644 (file)
index ff2e177..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2018 NXP
- */
-
-#ifndef __RTC_H
-#define __RTC_H
-
-#include "dprtc.h"
-#include "dprtc-cmd.h"
-
-extern int dpaa2_phc_index;
-
-#endif
index 015abf333c62d5da91459ae4f50aacd89f065405..ab11b2bee2739f261790559237f8848cd3eb0174 100644 (file)
@@ -562,7 +562,6 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
        if (r == tvq->num && tvq->busyloop_timeout) {
                /* Flush batched packets first */
                if (!vhost_sock_zcopy(tvq->private_data))
-                       // vhost_net_signal_used(tnvq);
                        vhost_tx_batch(net, tnvq, tvq->private_data, msghdr);
 
                vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
index 3946649b85c8908f4e9874b837ccdad274af8499..ba906876cc454f5e67865ad7af69ee3b37f5f059 100644 (file)
@@ -42,6 +42,7 @@ struct bmp_dib_header {
        u32 colors_important;
 } __packed;
 
+static bool use_bgrt = true;
 static bool request_mem_succeeded = false;
 static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
 
@@ -160,6 +161,9 @@ static void efifb_show_boot_graphics(struct fb_info *info)
        void *bgrt_image = NULL;
        u8 *dst = info->screen_base;
 
+       if (!use_bgrt)
+               return;
+
        if (!bgrt_tab.image_address) {
                pr_info("efifb: No BGRT, not showing boot graphics\n");
                return;
@@ -290,6 +294,8 @@ static int efifb_setup(char *options)
                                screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
                        else if (!strcmp(this_opt, "nowc"))
                                mem_flags &= ~EFI_MEMORY_WC;
+                       else if (!strcmp(this_opt, "nobgrt"))
+                               use_bgrt = false;
                }
        }
 
index ef69273074ba706752b52076b83e2cd070210fb2..a3edb20ea4c36094104e1cc45bfd30c976b0b41e 100644 (file)
@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
        if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
                return -EFAULT;
 
+       if (mr->w > 4096 || mr->h > 4096)
+               return -EINVAL;
+
        if (mr->w * mr->h * 3 > mr->buffer_size)
                return -EINVAL;
 
@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
                        mr->x, mr->y, mr->w, mr->h);
 
        if (r > 0) {
-               if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+               if (copy_to_user(mr->buffer, buf, r))
                        r = -EFAULT;
        }
 
index def3a501acd64484342f2315c9b32fe697b166a5..d059d04c63acd7bc118bbc0ec02fc3cb1dfa41f8 100644 (file)
@@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
        /*
         * enable controller clock
         */
-       clk_enable(fbi->clk);
+       clk_prepare_enable(fbi->clk);
 
        pxa168fb_set_par(info);
 
@@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
 failed_free_cmap:
        fb_dealloc_cmap(&info->cmap);
 failed_free_clk:
-       clk_disable(fbi->clk);
+       clk_disable_unprepare(fbi->clk);
 failed_free_fbmem:
        dma_free_coherent(fbi->dev, info->fix.smem_len,
                        info->screen_base, fbi->fb_start_dma);
@@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
        dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
                    info->screen_base, info->fix.smem_start);
 
-       clk_disable(fbi->clk);
+       clk_disable_unprepare(fbi->clk);
 
        framebuffer_release(info);
 
index 045e8afe398be35866adb64d774c0ebf0dd9834c..9e88e3f594c29c4d4a0c7362500b494fbf0ca2db 100644 (file)
@@ -1157,7 +1157,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
                        dev_name);
                   goto out_err0;
                }
-               /* fall though */
+               /* fall through */
        case S9000_ID_ARTIST:
        case S9000_ID_HCRX:
        case S9000_ID_TIMBER:
index 025a9a5e1c32c29c4daf3cd40aba8537475af250..55a756c60746ca7924625b99b0c2a42d429119fa 100644 (file)
 #include "internal.h"
 #include "afs_fs.h"
 
-//#define AFS_MAX_ADDRESSES
-//     ((unsigned int)((PAGE_SIZE - sizeof(struct afs_addr_list)) /
-//                     sizeof(struct sockaddr_rxrpc)))
-#define AFS_MAX_ADDRESSES ((unsigned int)(sizeof(unsigned long) * 8))
-
 /*
  * Release an address list.
  */
@@ -43,11 +38,15 @@ struct afs_addr_list *afs_alloc_addrlist(unsigned int nr,
 
        _enter("%u,%u,%u", nr, service, port);
 
+       if (nr > AFS_MAX_ADDRESSES)
+               nr = AFS_MAX_ADDRESSES;
+
        alist = kzalloc(struct_size(alist, addrs, nr), GFP_KERNEL);
        if (!alist)
                return NULL;
 
        refcount_set(&alist->usage, 1);
+       alist->max_addrs = nr;
 
        for (i = 0; i < nr; i++) {
                struct sockaddr_rxrpc *srx = &alist->addrs[i];
@@ -109,8 +108,6 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
        } while (p < end);
 
        _debug("%u/%u addresses", nr, AFS_MAX_ADDRESSES);
-       if (nr > AFS_MAX_ADDRESSES)
-               nr = AFS_MAX_ADDRESSES;
 
        alist = afs_alloc_addrlist(nr, service, port);
        if (!alist)
@@ -119,8 +116,10 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
        /* Extract the addresses */
        p = text;
        do {
-               struct sockaddr_rxrpc *srx = &alist->addrs[alist->nr_addrs];
                const char *q, *stop;
+               unsigned int xport = port;
+               __be32 x[4];
+               int family;
 
                if (*p == delim) {
                        p++;
@@ -136,19 +135,12 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
                                        break;
                }
 
-               if (in4_pton(p, q - p,
-                            (u8 *)&srx->transport.sin6.sin6_addr.s6_addr32[3],
-                            -1, &stop)) {
-                       srx->transport.sin6.sin6_addr.s6_addr32[0] = 0;
-                       srx->transport.sin6.sin6_addr.s6_addr32[1] = 0;
-                       srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
-               } else if (in6_pton(p, q - p,
-                                   srx->transport.sin6.sin6_addr.s6_addr,
-                                   -1, &stop)) {
-                       /* Nothing to do */
-               } else {
+               if (in4_pton(p, q - p, (u8 *)&x[0], -1, &stop))
+                       family = AF_INET;
+               else if (in6_pton(p, q - p, (u8 *)x, -1, &stop))
+                       family = AF_INET6;
+               else
                        goto bad_address;
-               }
 
                if (stop != q)
                        goto bad_address;
@@ -160,7 +152,7 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
                if (p < end) {
                        if (*p == '+') {
                                /* Port number specification "+1234" */
-                               unsigned int xport = 0;
+                               xport = 0;
                                p++;
                                if (p >= end || !isdigit(*p))
                                        goto bad_address;
@@ -171,7 +163,6 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
                                                goto bad_address;
                                        p++;
                                } while (p < end && isdigit(*p));
-                               srx->transport.sin6.sin6_port = htons(xport);
                        } else if (*p == delim) {
                                p++;
                        } else {
@@ -179,8 +170,12 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
                        }
                }
 
-               alist->nr_addrs++;
-       } while (p < end && alist->nr_addrs < AFS_MAX_ADDRESSES);
+               if (family == AF_INET)
+                       afs_merge_fs_addr4(alist, x[0], xport);
+               else
+                       afs_merge_fs_addr6(alist, x, xport);
+
+       } while (p < end);
 
        _leave(" = [nr %u]", alist->nr_addrs);
        return alist;
@@ -237,19 +232,23 @@ struct afs_addr_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry)
  */
 void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port)
 {
-       struct sockaddr_in6 *a;
-       __be16 xport = htons(port);
+       struct sockaddr_rxrpc *srx;
+       u32 addr = ntohl(xdr);
        int i;
 
+       if (alist->nr_addrs >= alist->max_addrs)
+               return;
+
        for (i = 0; i < alist->nr_ipv4; i++) {
-               a = &alist->addrs[i].transport.sin6;
-               if (xdr == a->sin6_addr.s6_addr32[3] &&
-                   xport == a->sin6_port)
+               struct sockaddr_in *a = &alist->addrs[i].transport.sin;
+               u32 a_addr = ntohl(a->sin_addr.s_addr);
+               u16 a_port = ntohs(a->sin_port);
+
+               if (addr == a_addr && port == a_port)
                        return;
-               if (xdr == a->sin6_addr.s6_addr32[3] &&
-                   (u16 __force)xport < (u16 __force)a->sin6_port)
+               if (addr == a_addr && port < a_port)
                        break;
-               if ((u32 __force)xdr < (u32 __force)a->sin6_addr.s6_addr32[3])
+               if (addr < a_addr)
                        break;
        }
 
@@ -258,12 +257,11 @@ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port)
                        alist->addrs + i,
                        sizeof(alist->addrs[0]) * (alist->nr_addrs - i));
 
-       a = &alist->addrs[i].transport.sin6;
-       a->sin6_port              = xport;
-       a->sin6_addr.s6_addr32[0] = 0;
-       a->sin6_addr.s6_addr32[1] = 0;
-       a->sin6_addr.s6_addr32[2] = htonl(0xffff);
-       a->sin6_addr.s6_addr32[3] = xdr;
+       srx = &alist->addrs[i];
+       srx->transport_len = sizeof(srx->transport.sin);
+       srx->transport.sin.sin_family = AF_INET;
+       srx->transport.sin.sin_port = htons(port);
+       srx->transport.sin.sin_addr.s_addr = xdr;
        alist->nr_ipv4++;
        alist->nr_addrs++;
 }
@@ -273,18 +271,20 @@ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port)
  */
 void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port)
 {
-       struct sockaddr_in6 *a;
-       __be16 xport = htons(port);
+       struct sockaddr_rxrpc *srx;
        int i, diff;
 
+       if (alist->nr_addrs >= alist->max_addrs)
+               return;
+
        for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
-               a = &alist->addrs[i].transport.sin6;
+               struct sockaddr_in6 *a = &alist->addrs[i].transport.sin6;
+               u16 a_port = ntohs(a->sin6_port);
+
                diff = memcmp(xdr, &a->sin6_addr, 16);
-               if (diff == 0 &&
-                   xport == a->sin6_port)
+               if (diff == 0 && port == a_port)
                        return;
-               if (diff == 0 &&
-                   (u16 __force)xport < (u16 __force)a->sin6_port)
+               if (diff == 0 && port < a_port)
                        break;
                if (diff < 0)
                        break;
@@ -295,12 +295,11 @@ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port)
                        alist->addrs + i,
                        sizeof(alist->addrs[0]) * (alist->nr_addrs - i));
 
-       a = &alist->addrs[i].transport.sin6;
-       a->sin6_port              = xport;
-       a->sin6_addr.s6_addr32[0] = xdr[0];
-       a->sin6_addr.s6_addr32[1] = xdr[1];
-       a->sin6_addr.s6_addr32[2] = xdr[2];
-       a->sin6_addr.s6_addr32[3] = xdr[3];
+       srx = &alist->addrs[i];
+       srx->transport_len = sizeof(srx->transport.sin6);
+       srx->transport.sin6.sin6_family = AF_INET6;
+       srx->transport.sin6.sin6_port = htons(port);
+       memcpy(&srx->transport.sin6.sin6_addr, xdr, 16);
        alist->nr_addrs++;
 }
 
index 871a228d7f37ce1b0b0d7122b41a8f83f134f391..8ae4e2ebb99a5bf063a059f82476f5bf2dac50e2 100644 (file)
@@ -73,12 +73,14 @@ struct afs_addr_list {
        struct rcu_head         rcu;            /* Must be first */
        refcount_t              usage;
        u32                     version;        /* Version */
-       unsigned short          nr_addrs;
-       unsigned short          index;          /* Address currently in use */
-       unsigned short          nr_ipv4;        /* Number of IPv4 addresses */
+       unsigned char           max_addrs;
+       unsigned char           nr_addrs;
+       unsigned char           index;          /* Address currently in use */
+       unsigned char           nr_ipv4;        /* Number of IPv4 addresses */
        unsigned long           probed;         /* Mask of servers that have been probed */
        unsigned long           yfs;            /* Mask of servers that are YFS */
        struct sockaddr_rxrpc   addrs[];
+#define AFS_MAX_ADDRESSES ((unsigned int)(sizeof(unsigned long) * 8))
 };
 
 /*
index 0c9ab62c3df45ae3a450a6a488e90bbd1a62ea61..9dcaed031843caeee3382a647eda0c71cc18d881 100644 (file)
@@ -1553,6 +1553,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
 
 /* Flags */
 #define   MID_WAIT_CANCELLED    1 /* Cancelled while waiting for response */
+#define   MID_DELETED            2 /* Mid has been dequeued/deleted */
 
 /* Types of response buffer returned from SendReceive2 */
 #define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
index 7aa08dba4719cde8c88c18bcb5ee4a4ddae733f1..52d71b64c0c6e5f954d408098fc78197898399c7 100644 (file)
@@ -659,7 +659,15 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
                mid->mid_state = MID_RESPONSE_RECEIVED;
        else
                mid->mid_state = MID_RESPONSE_MALFORMED;
-       list_del_init(&mid->qhead);
+       /*
+        * Trying to handle/dequeue a mid after the send_recv()
+        * function has finished processing it is a bug.
+        */
+       if (mid->mid_flags & MID_DELETED)
+               printk_once(KERN_WARNING
+                           "trying to dequeue a deleted mid\n");
+       else
+               list_del_init(&mid->qhead);
        spin_unlock(&GlobalMid_Lock);
 }
 
@@ -938,8 +946,7 @@ next_pdu:
                } else {
                        mids[0] = server->ops->find_mid(server, buf);
                        bufs[0] = buf;
-                       if (mids[0])
-                               num_mids = 1;
+                       num_mids = 1;
 
                        if (!mids[0] || !mids[0]->receive)
                                length = standard_receive3(server, mids[0]);
index d954ce36b4734c06ca63e2fdb0343f6109d2ec57..89985a0a6819e3a2a3348a0c4870157f7220665e 100644 (file)
@@ -1477,7 +1477,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        }
 
        srch_inf->entries_in_buffer = 0;
-       srch_inf->index_of_last_entry = 0;
+       srch_inf->index_of_last_entry = 2;
 
        rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
                                  fid->volatile_fid, 0, srch_inf);
index 78f96fa3d7d990217014395edc043cf422f6252d..b48f43963da6afcc7ff7da565477b00e67d92877 100644 (file)
@@ -142,7 +142,8 @@ void
 cifs_delete_mid(struct mid_q_entry *mid)
 {
        spin_lock(&GlobalMid_Lock);
-       list_del(&mid->qhead);
+       list_del_init(&mid->qhead);
+       mid->mid_flags |= MID_DELETED;
        spin_unlock(&GlobalMid_Lock);
 
        DeleteMidQEntry(mid);
@@ -772,6 +773,11 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
        return mid;
 }
 
+static void
+cifs_noop_callback(struct mid_q_entry *mid)
+{
+}
+
 int
 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                   const int flags, const int num_rqst, struct smb_rqst *rqst,
@@ -826,8 +832,13 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                }
 
                midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
+               /*
+                * We don't invoke the callback compounds unless it is the last
+                * request.
+                */
+               if (i < num_rqst - 1)
+                       midQ[i]->callback = cifs_noop_callback;
        }
-
        cifs_in_send_inc(ses->server);
        rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
        cifs_in_send_dec(ses->server);
@@ -908,6 +919,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                        midQ[i]->resp_buf = NULL;
        }
 out:
+       /*
+        * This will dequeue all mids. After this it is important that the
+        * demultiplex_thread will not process any of these mids any futher.
+        * This is prevented above by using a noop callback that will not
+        * wake this thread except for the very last PDU.
+        */
        for (i = 0; i < num_rqst; i++)
                cifs_delete_mid(midQ[i]);
        add_credits(ses->server, credits, optype);
index f32d7125ad0f237d61173cd72383683ac380c4e4..4becbf168b7f0df3229b1e1a5d0fb8daca02df0d 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -447,6 +447,7 @@ bool dax_lock_mapping_entry(struct page *page)
                        xa_unlock_irq(&mapping->i_pages);
                        break;
                } else if (IS_ERR(entry)) {
+                       xa_unlock_irq(&mapping->i_pages);
                        WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
                        continue;
                }
@@ -1120,21 +1121,12 @@ static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
 {
        struct inode *inode = mapping->host;
        unsigned long vaddr = vmf->address;
-       vm_fault_t ret = VM_FAULT_NOPAGE;
-       struct page *zero_page;
-       pfn_t pfn;
-
-       zero_page = ZERO_PAGE(0);
-       if (unlikely(!zero_page)) {
-               ret = VM_FAULT_OOM;
-               goto out;
-       }
+       pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
+       vm_fault_t ret;
 
-       pfn = page_to_pfn_t(zero_page);
        dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
                        false);
        ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
-out:
        trace_dax_load_hole(inode, vmf, ret);
        return ret;
 }
index 7f7ee18fe179c258ca9ccb41148592d816ec96f6..e4bb9386c04551e1af155154213285c6da688531 100644 (file)
@@ -1448,6 +1448,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
        }
        inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
        ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+       ext2_set_inode_flags(inode);
        ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
        ei->i_frag_no = raw_inode->i_frag;
        ei->i_frag_size = raw_inode->i_fsize;
@@ -1517,7 +1518,6 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
                           new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
        }
        brelse (bh);
-       ext2_set_inode_flags(inode);
        unlock_new_inode(inode);
        return inode;
        
index 3212c29235ce34d21dedc26ea569978d45462706..2005529af560891043170b4d86ed05c2a62f19eb 100644 (file)
@@ -230,7 +230,7 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
        ret = -EXDEV;
        if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
                goto fdput;
-       ret = do_clone_file_range(src_file.file, off, dst_file, destoff, olen);
+       ret = vfs_clone_file_range(src_file.file, off, dst_file, destoff, olen);
 fdput:
        fdput(src_file);
        return ret;
index 74762b1ec233f9e9ab1f5da47116271143a344db..ec15cf2ec696dae1f77a2c45fe4fd7129bbc2e29 100644 (file)
@@ -1051,6 +1051,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
        } else {
                WARN_ON_ONCE(!PageUptodate(page));
                iomap_page_create(inode, page);
+               set_page_dirty(page);
        }
 
        return length;
@@ -1090,7 +1091,6 @@ int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
                length -= ret;
        }
 
-       set_page_dirty(page);
        wait_for_stable_page(page);
        return VM_FAULT_LOCKED;
 out_unlock:
index 55a099e47ba2773e94e126285efc937391dee5d4..b53e76391e52539d11daee791bc47cc07b2ae773 100644 (file)
@@ -541,7 +541,8 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
 __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
                u64 dst_pos, u64 count)
 {
-       return nfserrno(do_clone_file_range(src, src_pos, dst, dst_pos, count));
+       return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos,
+                                            count));
 }
 
 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
index aaca0949fe53f6e383522bc3e9ae9b73124e4fc2..826f0567ec438caf6b677179be14a2460cf9a943 100644 (file)
@@ -584,9 +584,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
 
        res->last_used = 0;
 
-       spin_lock(&dlm->spinlock);
+       spin_lock(&dlm->track_lock);
        list_add_tail(&res->tracking, &dlm->tracking_list);
-       spin_unlock(&dlm->spinlock);
+       spin_unlock(&dlm->track_lock);
 
        memset(res->lvb, 0, DLM_LVB_LEN);
        memset(res->refmap, 0, sizeof(res->refmap));
index 7869622af22a2cd2ea0dfd164b1a5b3fba31cd25..7a5ee145c733f3b2547c0f85d2c1f6264cb9f240 100644 (file)
@@ -2946,6 +2946,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                if (map_end & (PAGE_SIZE - 1))
                        to = map_end & (PAGE_SIZE - 1);
 
+retry:
                page = find_or_create_page(mapping, page_index, GFP_NOFS);
                if (!page) {
                        ret = -ENOMEM;
@@ -2954,11 +2955,18 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                }
 
                /*
-                * In case PAGE_SIZE <= CLUSTER_SIZE, This page
-                * can't be dirtied before we CoW it out.
+                * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
+                * page, so write it back.
                 */
-               if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
-                       BUG_ON(PageDirty(page));
+               if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
+                       if (PageDirty(page)) {
+                               /*
+                                * write_on_page will unlock the page on return
+                                */
+                               ret = write_one_page(page);
+                               goto retry;
+                       }
+               }
 
                if (!PageUptodate(page)) {
                        ret = block_read_full_page(page, ocfs2_get_block);
index 296037afecdb4e689d458b54ffc597ed2265bbf9..1cc797a08a5b5f7eb6c002862c834177e5d6f93c 100644 (file)
@@ -141,7 +141,7 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
        }
 
        /* Try to use clone_file_range to clone up within the same fs */
-       error = vfs_clone_file_range(old_file, 0, new_file, 0, len);
+       error = do_clone_file_range(old_file, 0, new_file, 0, len);
        if (!error)
                goto out;
        /* Couldn't clone, so now we try to copy the data */
index aeaefd2a551b015d63b47cbe9ab25a204a9d3cb3..986313da0c8895352d2216f0fb0b78d3854064fb 100644 (file)
@@ -240,8 +240,10 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
                goto out_unlock;
 
        old_cred = ovl_override_creds(file_inode(file)->i_sb);
+       file_start_write(real.file);
        ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
                             ovl_iocb_to_rwf(iocb));
+       file_end_write(real.file);
        revert_creds(old_cred);
 
        /* Update size */
index b6ac545b5a32188297352dd0793565423fee2fcb..3b7ed5d2279c6a8efde8180471bde94ef1020964 100644 (file)
@@ -504,7 +504,7 @@ static const struct inode_operations ovl_special_inode_operations = {
        .update_time    = ovl_update_time,
 };
 
-const struct address_space_operations ovl_aops = {
+static const struct address_space_operations ovl_aops = {
        /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
        .direct_IO              = noop_direct_IO,
 };
index f28711846dd6ebad2cca1ad5fc3a4ef95f3806b7..9c0ca6a7becfbe56e15efd596fbc6540b4bbd859 100644 (file)
@@ -686,7 +686,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
                        index = NULL;
                        goto out;
                }
-               pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
+               pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
                                    "overlayfs: mount with '-o index=off' to disable inodes index.\n",
                                    d_inode(origin)->i_ino, name.len, name.name,
                                    err);
index f61839e1054c6b40872a6bf3dc89e3a7da7b6f52..a3c0d95843121e92a103a6b07628feb853c31399 100644 (file)
@@ -152,8 +152,8 @@ static inline int ovl_do_setxattr(struct dentry *dentry, const char *name,
                                  const void *value, size_t size, int flags)
 {
        int err = vfs_setxattr(dentry, name, value, size, flags);
-       pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n",
-                dentry, name, (int) size, (char *) value, flags, err);
+       pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, 0x%x) = %i\n",
+                dentry, name, min((int)size, 48), value, size, flags, err);
        return err;
 }
 
index 8cfb62cc86728029e271df468fe3b8415de1fc46..ace4fe4c39a9307aa6008702f0195a92af74627c 100644 (file)
@@ -683,7 +683,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
        struct dentry *upperdentry = ovl_dentry_upper(dentry);
        struct dentry *index = NULL;
        struct inode *inode;
-       struct qstr name;
+       struct qstr name = { };
        int err;
 
        err = ovl_get_index_name(lowerdentry, &name);
@@ -726,6 +726,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
                goto fail;
 
 out:
+       kfree(name.name);
        dput(index);
        return;
 
index ccf86f16d9f0190c18e7f4345b7ca0960709eb05..7e9f07bf260d20bb0a0cd4cd6b6b4abe82b23e20 100644 (file)
@@ -407,6 +407,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
        unsigned long *entries;
        int err;
 
+       /*
+        * The ability to racily run the kernel stack unwinder on a running task
+        * and then observe the unwinder output is scary; while it is useful for
+        * debugging kernel issues, it can also allow an attacker to leak kernel
+        * stack contents.
+        * Doing this in a manner that is at least safe from races would require
+        * some work to ensure that the remote task can not be scheduled; and
+        * even then, this would still expose the unwinder as local attack
+        * surface.
+        * Therefore, this interface is restricted to root.
+        */
+       if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
+               return -EACCES;
+
        entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries),
                                GFP_KERNEL);
        if (!entries)
index bbd1e357c23df64b385f4baf119e649342c9c50e..f4fd2e72add4ebd512d4e3e499c0c07999c455df 100644 (file)
@@ -898,8 +898,22 @@ static struct platform_driver ramoops_driver = {
        },
 };
 
-static void ramoops_register_dummy(void)
+static inline void ramoops_unregister_dummy(void)
 {
+       platform_device_unregister(dummy);
+       dummy = NULL;
+
+       kfree(dummy_data);
+       dummy_data = NULL;
+}
+
+static void __init ramoops_register_dummy(void)
+{
+       /*
+        * Prepare a dummy platform data structure to carry the module
+        * parameters. If mem_size isn't set, then there are no module
+        * parameters, and we can skip this.
+        */
        if (!mem_size)
                return;
 
@@ -932,21 +946,28 @@ static void ramoops_register_dummy(void)
        if (IS_ERR(dummy)) {
                pr_info("could not create platform device: %ld\n",
                        PTR_ERR(dummy));
+               dummy = NULL;
+               ramoops_unregister_dummy();
        }
 }
 
 static int __init ramoops_init(void)
 {
+       int ret;
+
        ramoops_register_dummy();
-       return platform_driver_register(&ramoops_driver);
+       ret = platform_driver_register(&ramoops_driver);
+       if (ret != 0)
+               ramoops_unregister_dummy();
+
+       return ret;
 }
 late_initcall(ramoops_init);
 
 static void __exit ramoops_exit(void)
 {
        platform_driver_unregister(&ramoops_driver);
-       platform_device_unregister(dummy);
-       kfree(dummy_data);
+       ramoops_unregister_dummy();
 }
 module_exit(ramoops_exit);
 
index 39b4a21dd9337a157927c1f8d0d741190de11e78..8a2737f0d61d3e0fbef04107da4a11283653544b 100644 (file)
@@ -1818,8 +1818,8 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
 }
 EXPORT_SYMBOL(vfs_clone_file_prep_inodes);
 
-int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
-               struct file *file_out, loff_t pos_out, u64 len)
+int do_clone_file_range(struct file *file_in, loff_t pos_in,
+                       struct file *file_out, loff_t pos_out, u64 len)
 {
        struct inode *inode_in = file_inode(file_in);
        struct inode *inode_out = file_inode(file_out);
@@ -1866,6 +1866,19 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
 
        return ret;
 }
+EXPORT_SYMBOL(do_clone_file_range);
+
+int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+                        struct file *file_out, loff_t pos_out, u64 len)
+{
+       int ret;
+
+       file_start_write(file_out);
+       ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len);
+       file_end_write(file_out);
+
+       return ret;
+}
 EXPORT_SYMBOL(vfs_clone_file_range);
 
 /*
index daa732550088957538842fc34f8d84228ccbde33..0d6a6a4af8616dcdc484a6f0a417167460adfa64 100644 (file)
@@ -948,17 +948,19 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
        int err = 0;
 
 #ifdef CONFIG_FS_POSIX_ACL
-       if (inode->i_acl) {
-               err = xattr_list_one(&buffer, &remaining_size,
-                                    XATTR_NAME_POSIX_ACL_ACCESS);
-               if (err)
-                       return err;
-       }
-       if (inode->i_default_acl) {
-               err = xattr_list_one(&buffer, &remaining_size,
-                                    XATTR_NAME_POSIX_ACL_DEFAULT);
-               if (err)
-                       return err;
+       if (IS_POSIXACL(inode)) {
+               if (inode->i_acl) {
+                       err = xattr_list_one(&buffer, &remaining_size,
+                                            XATTR_NAME_POSIX_ACL_ACCESS);
+                       if (err)
+                               return err;
+               }
+               if (inode->i_default_acl) {
+                       err = xattr_list_one(&buffer, &remaining_size,
+                                            XATTR_NAME_POSIX_ACL_DEFAULT);
+                       if (err)
+                               return err;
+               }
        }
 #endif
 
index 1e671d4eb6fa652681d5d1f97e7ddb9cc41488af..c6299f82a6e496ac00b1ef27953a5ca5313cb9f8 100644 (file)
@@ -587,7 +587,7 @@ xfs_attr_leaf_addname(
                 */
                error = xfs_attr3_leaf_to_node(args);
                if (error)
-                       goto out_defer_cancel;
+                       return error;
                error = xfs_defer_finish(&args->trans);
                if (error)
                        return error;
@@ -675,7 +675,7 @@ xfs_attr_leaf_addname(
                        error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
                        /* bp is gone due to xfs_da_shrink_inode */
                        if (error)
-                               goto out_defer_cancel;
+                               return error;
                        error = xfs_defer_finish(&args->trans);
                        if (error)
                                return error;
@@ -693,9 +693,6 @@ xfs_attr_leaf_addname(
                error = xfs_attr3_leaf_clearflag(args);
        }
        return error;
-out_defer_cancel:
-       xfs_defer_cancel(args->trans);
-       return error;
 }
 
 /*
@@ -738,15 +735,12 @@ xfs_attr_leaf_removename(
                error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
                /* bp is gone due to xfs_da_shrink_inode */
                if (error)
-                       goto out_defer_cancel;
+                       return error;
                error = xfs_defer_finish(&args->trans);
                if (error)
                        return error;
        }
        return 0;
-out_defer_cancel:
-       xfs_defer_cancel(args->trans);
-       return error;
 }
 
 /*
@@ -864,7 +858,7 @@ restart:
                        state = NULL;
                        error = xfs_attr3_leaf_to_node(args);
                        if (error)
-                               goto out_defer_cancel;
+                               goto out;
                        error = xfs_defer_finish(&args->trans);
                        if (error)
                                goto out;
@@ -888,7 +882,7 @@ restart:
                 */
                error = xfs_da3_split(state);
                if (error)
-                       goto out_defer_cancel;
+                       goto out;
                error = xfs_defer_finish(&args->trans);
                if (error)
                        goto out;
@@ -984,7 +978,7 @@ restart:
                if (retval && (state->path.active > 1)) {
                        error = xfs_da3_join(state);
                        if (error)
-                               goto out_defer_cancel;
+                               goto out;
                        error = xfs_defer_finish(&args->trans);
                        if (error)
                                goto out;
@@ -1013,9 +1007,6 @@ out:
        if (error)
                return error;
        return retval;
-out_defer_cancel:
-       xfs_defer_cancel(args->trans);
-       goto out;
 }
 
 /*
@@ -1107,7 +1098,7 @@ xfs_attr_node_removename(
        if (retval && (state->path.active > 1)) {
                error = xfs_da3_join(state);
                if (error)
-                       goto out_defer_cancel;
+                       goto out;
                error = xfs_defer_finish(&args->trans);
                if (error)
                        goto out;
@@ -1138,7 +1129,7 @@ xfs_attr_node_removename(
                        error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
                        /* bp is gone due to xfs_da_shrink_inode */
                        if (error)
-                               goto out_defer_cancel;
+                               goto out;
                        error = xfs_defer_finish(&args->trans);
                        if (error)
                                goto out;
@@ -1150,9 +1141,6 @@ xfs_attr_node_removename(
 out:
        xfs_da_state_free(state);
        return error;
-out_defer_cancel:
-       xfs_defer_cancel(args->trans);
-       goto out;
 }
 
 /*
index af094063e4029ae9b15af7d7da29419b7706485f..d89363c6b5234d73cef58d4e9533a88f6de09c46 100644 (file)
@@ -485,7 +485,7 @@ xfs_attr_rmtval_set(
                                  blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map,
                                  &nmap);
                if (error)
-                       goto out_defer_cancel;
+                       return error;
                error = xfs_defer_finish(&args->trans);
                if (error)
                        return error;
@@ -553,9 +553,6 @@ xfs_attr_rmtval_set(
        }
        ASSERT(valuelen == 0);
        return 0;
-out_defer_cancel:
-       xfs_defer_cancel(args->trans);
-       return error;
 }
 
 /*
@@ -625,7 +622,7 @@ xfs_attr_rmtval_remove(
                error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
                                    XFS_BMAPI_ATTRFORK, 1, &done);
                if (error)
-                       goto out_defer_cancel;
+                       return error;
                error = xfs_defer_finish(&args->trans);
                if (error)
                        return error;
@@ -638,7 +635,4 @@ xfs_attr_rmtval_remove(
                        return error;
        }
        return 0;
-out_defer_cancel:
-       xfs_defer_cancel(args->trans);
-       return error;
 }
index 2760314fdf7f1a9a57de82a043748bafe8273995..a47670332326449cb97f73850887cea799ea684c 100644 (file)
@@ -673,7 +673,8 @@ xfs_bmap_extents_to_btree(
        ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
 
        /*
-        * Make space in the inode incore.
+        * Make space in the inode incore. This needs to be undone if we fail
+        * to expand the root.
         */
        xfs_iroot_realloc(ip, 1, whichfork);
        ifp->if_flags |= XFS_IFBROOT;
@@ -711,16 +712,15 @@ xfs_bmap_extents_to_btree(
        args.minlen = args.maxlen = args.prod = 1;
        args.wasdel = wasdel;
        *logflagsp = 0;
-       if ((error = xfs_alloc_vextent(&args))) {
-               ASSERT(ifp->if_broot == NULL);
-               goto err1;
-       }
+       error = xfs_alloc_vextent(&args);
+       if (error)
+               goto out_root_realloc;
 
        if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
-               ASSERT(ifp->if_broot == NULL);
                error = -ENOSPC;
-               goto err1;
+               goto out_root_realloc;
        }
+
        /*
         * Allocation can't fail, the space was reserved.
         */
@@ -732,9 +732,10 @@ xfs_bmap_extents_to_btree(
        xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
        abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
        if (!abp) {
-               error = -ENOSPC;
-               goto err2;
+               error = -EFSCORRUPTED;
+               goto out_unreserve_dquot;
        }
+
        /*
         * Fill in the child block.
         */
@@ -775,11 +776,12 @@ xfs_bmap_extents_to_btree(
        *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
        return 0;
 
-err2:
+out_unreserve_dquot:
        xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
-err1:
+out_root_realloc:
        xfs_iroot_realloc(ip, -1, whichfork);
        XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+       ASSERT(ifp->if_broot == NULL);
        xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 
        return error;
index 059bc44c27e83edf3cb1fe2c494490e65f93c5d8..afbe336600e165e2fe475ebcf6683e3f677c1cc4 100644 (file)
@@ -1016,6 +1016,8 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
 #define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
 #define XFS_DIFLAG_NODEFRAG_BIT     13 /* do not reorganize/defragment */
 #define XFS_DIFLAG_FILESTREAM_BIT   14  /* use filestream allocator */
+/* Do not use bit 15, di_flags is legacy and unchanging now */
+
 #define XFS_DIFLAG_REALTIME      (1 << XFS_DIFLAG_REALTIME_BIT)
 #define XFS_DIFLAG_PREALLOC      (1 << XFS_DIFLAG_PREALLOC_BIT)
 #define XFS_DIFLAG_NEWRTBM       (1 << XFS_DIFLAG_NEWRTBM_BIT)
index 30d1d60f1d46e62ff71eca1f45b273536cc6cce1..09d9c8cfa4a09f933a55f1122879809ecb3010af 100644 (file)
@@ -415,6 +415,31 @@ xfs_dinode_verify_fork(
        return NULL;
 }
 
+static xfs_failaddr_t
+xfs_dinode_verify_forkoff(
+       struct xfs_dinode       *dip,
+       struct xfs_mount        *mp)
+{
+       if (!XFS_DFORK_Q(dip))
+               return NULL;
+
+       switch (dip->di_format)  {
+       case XFS_DINODE_FMT_DEV:
+               if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
+                       return __this_address;
+               break;
+       case XFS_DINODE_FMT_LOCAL:      /* fall through ... */
+       case XFS_DINODE_FMT_EXTENTS:    /* fall through ... */
+       case XFS_DINODE_FMT_BTREE:
+               if (dip->di_forkoff >= (XFS_LITINO(mp, dip->di_version) >> 3))
+                       return __this_address;
+               break;
+       default:
+               return __this_address;
+       }
+       return NULL;
+}
+
 xfs_failaddr_t
 xfs_dinode_verify(
        struct xfs_mount        *mp,
@@ -470,6 +495,11 @@ xfs_dinode_verify(
        if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
                return __this_address;
 
+       /* check for illegal values of forkoff */
+       fa = xfs_dinode_verify_forkoff(dip, mp);
+       if (fa)
+               return fa;
+
        /* Do we have appropriate data fork formats for the mode? */
        switch (mode & S_IFMT) {
        case S_IFIFO:
index 036b5c7021eb322452fac7e0350ef7d47b8aa2eb..376bcb585ae6916e8ca089009d37f6e79b4bed19 100644 (file)
@@ -17,7 +17,6 @@
 #include "xfs_sb.h"
 #include "xfs_alloc.h"
 #include "xfs_rmap.h"
-#include "xfs_alloc.h"
 #include "scrub/xfs_scrub.h"
 #include "scrub/scrub.h"
 #include "scrub/common.h"
index 5b3b177c0fc908fa9527b3a35302e91993027c9c..e386c9b0b4ab7de6bc2d42fddcf204539d8d2a28 100644 (file)
@@ -126,6 +126,7 @@ xchk_inode_flags(
 {
        struct xfs_mount        *mp = sc->mp;
 
+       /* di_flags are all taken, last bit cannot be used */
        if (flags & ~XFS_DIFLAG_ANY)
                goto bad;
 
@@ -172,8 +173,9 @@ xchk_inode_flags2(
 {
        struct xfs_mount        *mp = sc->mp;
 
+       /* Unknown di_flags2 could be from a future kernel */
        if (flags2 & ~XFS_DIFLAG2_ANY)
-               goto bad;
+               xchk_ino_set_warning(sc, ino);
 
        /* reflink flag requires reflink feature */
        if ((flags2 & XFS_DIFLAG2_REFLINK) &&
index addbd74ecd8e5185ec70ae91cfe4cebe700bc9bb..6de8d90041ff0e676e85e559c1b2eef65dd74946 100644 (file)
@@ -702,13 +702,9 @@ xfs_bmap_punch_delalloc_range(
        struct xfs_iext_cursor  icur;
        int                     error = 0;
 
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       if (!(ifp->if_flags & XFS_IFEXTENTS)) {
-               error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
-               if (error)
-                       goto out_unlock;
-       }
+       ASSERT(ifp->if_flags & XFS_IFEXTENTS);
 
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
        if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
                goto out_unlock;
 
@@ -1584,7 +1580,7 @@ xfs_swap_extent_rmap(
                                        tirec.br_blockcount, &irec,
                                        &nimaps, 0);
                        if (error)
-                               goto out_defer;
+                               goto out;
                        ASSERT(nimaps == 1);
                        ASSERT(tirec.br_startoff == irec.br_startoff);
                        trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
@@ -1599,22 +1595,22 @@ xfs_swap_extent_rmap(
                        /* Remove the mapping from the donor file. */
                        error = xfs_bmap_unmap_extent(tp, tip, &uirec);
                        if (error)
-                               goto out_defer;
+                               goto out;
 
                        /* Remove the mapping from the source file. */
                        error = xfs_bmap_unmap_extent(tp, ip, &irec);
                        if (error)
-                               goto out_defer;
+                               goto out;
 
                        /* Map the donor file's blocks into the source file. */
                        error = xfs_bmap_map_extent(tp, ip, &uirec);
                        if (error)
-                               goto out_defer;
+                               goto out;
 
                        /* Map the source file's blocks into the donor file. */
                        error = xfs_bmap_map_extent(tp, tip, &irec);
                        if (error)
-                               goto out_defer;
+                               goto out;
 
                        error = xfs_defer_finish(tpp);
                        tp = *tpp;
@@ -1636,8 +1632,6 @@ xfs_swap_extent_rmap(
        tip->i_d.di_flags2 = tip_flags2;
        return 0;
 
-out_defer:
-       xfs_defer_cancel(tp);
 out:
        trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
        tip->i_d.di_flags2 = tip_flags2;
index 1c9d1398980b6562969ab03a38e5158aeafc6c07..12d8455bfbb29114887744046d52cb75428bc911 100644 (file)
@@ -531,6 +531,49 @@ xfs_buf_item_push(
        return rval;
 }
 
+/*
+ * Drop the buffer log item refcount and take appropriate action. This helper
+ * determines whether the bli must be freed or not, since a decrement to zero
+ * does not necessarily mean the bli is unused.
+ *
+ * Return true if the bli is freed, false otherwise.
+ */
+bool
+xfs_buf_item_put(
+       struct xfs_buf_log_item *bip)
+{
+       struct xfs_log_item     *lip = &bip->bli_item;
+       bool                    aborted;
+       bool                    dirty;
+
+       /* drop the bli ref and return if it wasn't the last one */
+       if (!atomic_dec_and_test(&bip->bli_refcount))
+               return false;
+
+       /*
+        * We dropped the last ref and must free the item if clean or aborted.
+        * If the bli is dirty and non-aborted, the buffer was clean in the
+        * transaction but still awaiting writeback from previous changes. In
+        * that case, the bli is freed on buffer writeback completion.
+        */
+       aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
+                 XFS_FORCED_SHUTDOWN(lip->li_mountp);
+       dirty = bip->bli_flags & XFS_BLI_DIRTY;
+       if (dirty && !aborted)
+               return false;
+
+       /*
+        * The bli is aborted or clean. An aborted item may be in the AIL
+        * regardless of dirty state.  For example, consider an aborted
+        * transaction that invalidated a dirty bli and cleared the dirty
+        * state.
+        */
+       if (aborted)
+               xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
+       xfs_buf_item_relse(bip->bli_buf);
+       return true;
+}
+
 /*
  * Release the buffer associated with the buf log item.  If there is no dirty
  * logged data associated with the buffer recorded in the buf log item, then
@@ -556,76 +599,42 @@ xfs_buf_item_unlock(
 {
        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
        struct xfs_buf          *bp = bip->bli_buf;
-       bool                    aborted;
-       bool                    hold = !!(bip->bli_flags & XFS_BLI_HOLD);
-       bool                    dirty = !!(bip->bli_flags & XFS_BLI_DIRTY);
+       bool                    released;
+       bool                    hold = bip->bli_flags & XFS_BLI_HOLD;
+       bool                    stale = bip->bli_flags & XFS_BLI_STALE;
 #if defined(DEBUG) || defined(XFS_WARN)
-       bool                    ordered = !!(bip->bli_flags & XFS_BLI_ORDERED);
+       bool                    ordered = bip->bli_flags & XFS_BLI_ORDERED;
+       bool                    dirty = bip->bli_flags & XFS_BLI_DIRTY;
 #endif
 
-       aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags);
-
-       /* Clear the buffer's association with this transaction. */
-       bp->b_transp = NULL;
-
-       /*
-        * The per-transaction state has been copied above so clear it from the
-        * bli.
-        */
-       bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
-
-       /*
-        * If the buf item is marked stale, then don't do anything.  We'll
-        * unlock the buffer and free the buf item when the buffer is unpinned
-        * for the last time.
-        */
-       if (bip->bli_flags & XFS_BLI_STALE) {
-               trace_xfs_buf_item_unlock_stale(bip);
-               ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
-               if (!aborted) {
-                       atomic_dec(&bip->bli_refcount);
-                       return;
-               }
-       }
-
        trace_xfs_buf_item_unlock(bip);
 
        /*
-        * If the buf item isn't tracking any data, free it, otherwise drop the
-        * reference we hold to it. If we are aborting the transaction, this may
-        * be the only reference to the buf item, so we free it anyway
-        * regardless of whether it is dirty or not. A dirty abort implies a
-        * shutdown, anyway.
-        *
         * The bli dirty state should match whether the blf has logged segments
         * except for ordered buffers, where only the bli should be dirty.
         */
        ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
               (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
+       ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
 
        /*
-        * Clean buffers, by definition, cannot be in the AIL. However, aborted
-        * buffers may be in the AIL regardless of dirty state. An aborted
-        * transaction that invalidates a buffer already in the AIL may have
-        * marked it stale and cleared the dirty state, for example.
-        *
-        * Therefore if we are aborting a buffer and we've just taken the last
-        * reference away, we have to check if it is in the AIL before freeing
-        * it. We need to free it in this case, because an aborted transaction
-        * has already shut the filesystem down and this is the last chance we
-        * will have to do so.
+        * Clear the buffer's association with this transaction and
+        * per-transaction state from the bli, which has been copied above.
         */
-       if (atomic_dec_and_test(&bip->bli_refcount)) {
-               if (aborted) {
-                       ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
-                       xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
-                       xfs_buf_item_relse(bp);
-               } else if (!dirty)
-                       xfs_buf_item_relse(bp);
-       }
+       bp->b_transp = NULL;
+       bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
 
-       if (!hold)
-               xfs_buf_relse(bp);
+       /*
+        * Unref the item and unlock the buffer unless held or stale. Stale
+        * buffers remain locked until final unpin unless the bli is freed by
+        * the unref call. The latter implies shutdown because buffer
+        * invalidation dirties the bli and transaction.
+        */
+       released = xfs_buf_item_put(bip);
+       if (hold || (stale && !released))
+               return;
+       ASSERT(!stale || test_bit(XFS_LI_ABORTED, &lip->li_flags));
+       xfs_buf_relse(bp);
 }
 
 /*
index 3f7d7b72e7e610aa9d7b5f717858895ccd48ea3d..90f65f891fabd27210e52a2c9085c677d12fde62 100644 (file)
@@ -51,6 +51,7 @@ struct xfs_buf_log_item {
 
 int    xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
 void   xfs_buf_item_relse(struct xfs_buf *);
+bool   xfs_buf_item_put(struct xfs_buf_log_item *);
 void   xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint);
 bool   xfs_buf_item_dirty_format(struct xfs_buf_log_item *);
 void   xfs_buf_attach_iodone(struct xfs_buf *,
index d957a46dc1cb8754f7cc99b35d30a15a7fb45da2..05db9540e4597536211446475d301eac834e972e 100644 (file)
@@ -1563,7 +1563,7 @@ xfs_itruncate_extents_flags(
                error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
                                    XFS_ITRUNC_MAX_EXTENTS, &done);
                if (error)
-                       goto out_bmap_cancel;
+                       goto out;
 
                /*
                 * Duplicate the transaction that has the permanent
@@ -1599,14 +1599,6 @@ xfs_itruncate_extents_flags(
 out:
        *tpp = tp;
        return error;
-out_bmap_cancel:
-       /*
-        * If the bunmapi call encounters an error, return to the caller where
-        * the transaction can be properly aborted.  We just need to make sure
-        * we're not holding any resources that we were not when we came in.
-        */
-       xfs_defer_cancel(tp);
-       goto out;
 }
 
 int
index c3e74f9128e8af22e64f6142b104389a9e96f10e..f48ffd7a8d3e491d76defe66961194a635276115 100644 (file)
@@ -471,8 +471,18 @@ xfs_vn_get_link_inline(
        struct inode            *inode,
        struct delayed_call     *done)
 {
+       char                    *link;
+
        ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE);
-       return XFS_I(inode)->i_df.if_u1.if_data;
+
+       /*
+        * The VFS crashes on a NULL pointer, so return -EFSCORRUPTED if
+        * if_data is junk.
+        */
+       link = XFS_I(inode)->i_df.if_u1.if_data;
+       if (!link)
+               return ERR_PTR(-EFSCORRUPTED);
+       return link;
 }
 
 STATIC int
index a21dc61ec09eb3f76faaeaf653606e942f6cdd90..1fc9e9042e0ef7dfb1e138f229a4f91e231a91f7 100644 (file)
@@ -1570,16 +1570,6 @@ xlog_find_zeroed(
        if (last_cycle != 0) {          /* log completely written to */
                xlog_put_bp(bp);
                return 0;
-       } else if (first_cycle != 1) {
-               /*
-                * If the cycle of the last block is zero, the cycle of
-                * the first block must be 1. If it's not, maybe we're
-                * not looking at a log... Bail out.
-                */
-               xfs_warn(log->l_mp,
-                       "Log inconsistent or not a log (last==0, first!=1)");
-               error = -EINVAL;
-               goto bp_err;
        }
 
        /* we have a partially zeroed log */
index 38f405415b88a4e796c79071182d419c3208f195..5289e22cb081d4aee3f0a57ef7665b3930393c15 100644 (file)
@@ -352,6 +352,47 @@ xfs_reflink_convert_cow(
        return error;
 }
 
+/*
+ * Find the extent that maps the given range in the COW fork. Even if the extent
+ * is not shared we might have a preallocation for it in the COW fork. If so we
+ * use it that rather than trigger a new allocation.
+ */
+static int
+xfs_find_trim_cow_extent(
+       struct xfs_inode        *ip,
+       struct xfs_bmbt_irec    *imap,
+       bool                    *shared,
+       bool                    *found)
+{
+       xfs_fileoff_t           offset_fsb = imap->br_startoff;
+       xfs_filblks_t           count_fsb = imap->br_blockcount;
+       struct xfs_iext_cursor  icur;
+       struct xfs_bmbt_irec    got;
+       bool                    trimmed;
+
+       *found = false;
+
+       /*
+        * If we don't find an overlapping extent, trim the range we need to
+        * allocate to fit the hole we found.
+        */
+       if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) ||
+           got.br_startoff > offset_fsb)
+               return xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
+
+       *shared = true;
+       if (isnullstartblock(got.br_startblock)) {
+               xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
+               return 0;
+       }
+
+       /* real extent found - no need to allocate */
+       xfs_trim_extent(&got, offset_fsb, count_fsb);
+       *imap = got;
+       *found = true;
+       return 0;
+}
+
 /* Allocate all CoW reservations covering a range of blocks in a file. */
 int
 xfs_reflink_allocate_cow(
@@ -363,78 +404,64 @@ xfs_reflink_allocate_cow(
        struct xfs_mount        *mp = ip->i_mount;
        xfs_fileoff_t           offset_fsb = imap->br_startoff;
        xfs_filblks_t           count_fsb = imap->br_blockcount;
-       struct xfs_bmbt_irec    got;
-       struct xfs_trans        *tp = NULL;
+       struct xfs_trans        *tp;
        int                     nimaps, error = 0;
-       bool                    trimmed;
+       bool                    found;
        xfs_filblks_t           resaligned;
        xfs_extlen_t            resblks = 0;
-       struct xfs_iext_cursor  icur;
 
-retry:
-       ASSERT(xfs_is_reflink_inode(ip));
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+       ASSERT(xfs_is_reflink_inode(ip));
 
-       /*
-        * Even if the extent is not shared we might have a preallocation for
-        * it in the COW fork.  If so use it.
-        */
-       if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) &&
-           got.br_startoff <= offset_fsb) {
-               *shared = true;
-
-               /* If we have a real allocation in the COW fork we're done. */
-               if (!isnullstartblock(got.br_startblock)) {
-                       xfs_trim_extent(&got, offset_fsb, count_fsb);
-                       *imap = got;
-                       goto convert;
-               }
+       error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
+       if (error || !*shared)
+               return error;
+       if (found)
+               goto convert;
 
-               xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
-       } else {
-               error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
-               if (error || !*shared)
-                       goto out;
-       }
+       resaligned = xfs_aligned_fsb_count(imap->br_startoff,
+               imap->br_blockcount, xfs_get_cowextsz_hint(ip));
+       resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
 
-       if (!tp) {
-               resaligned = xfs_aligned_fsb_count(imap->br_startoff,
-                       imap->br_blockcount, xfs_get_cowextsz_hint(ip));
-               resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
+       xfs_iunlock(ip, *lockmode);
+       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
+       *lockmode = XFS_ILOCK_EXCL;
+       xfs_ilock(ip, *lockmode);
 
-               xfs_iunlock(ip, *lockmode);
-               error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
-               *lockmode = XFS_ILOCK_EXCL;
-               xfs_ilock(ip, *lockmode);
+       if (error)
+               return error;
 
-               if (error)
-                       return error;
+       error = xfs_qm_dqattach_locked(ip, false);
+       if (error)
+               goto out_trans_cancel;
 
-               error = xfs_qm_dqattach_locked(ip, false);
-               if (error)
-                       goto out;
-               goto retry;
+       /*
+        * Check for an overlapping extent again now that we dropped the ilock.
+        */
+       error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
+       if (error || !*shared)
+               goto out_trans_cancel;
+       if (found) {
+               xfs_trans_cancel(tp);
+               goto convert;
        }
 
        error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
                        XFS_QMOPT_RES_REGBLKS);
        if (error)
-               goto out;
+               goto out_trans_cancel;
 
        xfs_trans_ijoin(tp, ip, 0);
 
-       nimaps = 1;
-
        /* Allocate the entire reservation as unwritten blocks. */
+       nimaps = 1;
        error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
                        XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
                        resblks, imap, &nimaps);
        if (error)
-               goto out_trans_cancel;
+               goto out_unreserve;
 
        xfs_inode_set_cowblocks_tag(ip);
-
-       /* Finish up. */
        error = xfs_trans_commit(tp);
        if (error)
                return error;
@@ -447,12 +474,12 @@ retry:
                return -ENOSPC;
 convert:
        return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb);
-out_trans_cancel:
+
+out_unreserve:
        xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
                        XFS_QMOPT_RES_REGBLKS);
-out:
-       if (tp)
-               xfs_trans_cancel(tp);
+out_trans_cancel:
+       xfs_trans_cancel(tp);
        return error;
 }
 
@@ -666,14 +693,12 @@ xfs_reflink_end_cow(
                if (!del.br_blockcount)
                        goto prev_extent;
 
-               ASSERT(!isnullstartblock(got.br_startblock));
-
                /*
-                * Don't remap unwritten extents; these are
-                * speculatively preallocated CoW extents that have been
-                * allocated but have not yet been involved in a write.
+                * Only remap real extent that contain data.  With AIO
+                * speculatively preallocations can leak into the range we
+                * are called upon, and we need to skip them.
                 */
-               if (got.br_state == XFS_EXT_UNWRITTEN)
+               if (!xfs_bmap_is_real_extent(&got))
                        goto prev_extent;
 
                /* Unmap the old blocks in the data fork. */
index ad315e83bc02cfdaf986390e1aa1a91915afae9e..3043e5ed6495580de11de6932117addca0e85aec 100644 (file)
@@ -473,7 +473,6 @@ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
-DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
 DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
index bedc5a5133a56d40ebbf569e9bc02b120f4e3557..912b42f5fe4ac61ed79bbc729f5ccf094de66c32 100644 (file)
@@ -259,6 +259,14 @@ xfs_trans_alloc(
        struct xfs_trans        *tp;
        int                     error;
 
+       /*
+        * Allocate the handle before we do our freeze accounting and setting up
+        * GFP_NOFS allocation context so that we avoid lockdep false positives
+        * by doing GFP_KERNEL allocations inside sb_start_intwrite().
+        */
+       tp = kmem_zone_zalloc(xfs_trans_zone,
+               (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
+
        if (!(flags & XFS_TRANS_NO_WRITECOUNT))
                sb_start_intwrite(mp->m_super);
 
@@ -270,8 +278,6 @@ xfs_trans_alloc(
                mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
        atomic_inc(&mp->m_active_trans);
 
-       tp = kmem_zone_zalloc(xfs_trans_zone,
-               (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
        tp->t_magic = XFS_TRANS_HEADER_MAGIC;
        tp->t_flags = flags;
        tp->t_mountp = mp;
index 15919f67a88f57aeab504f471c9088d066d08493..286a287ac57acc5c5abcbe51a881af09026ecf0e 100644 (file)
@@ -322,49 +322,38 @@ xfs_trans_read_buf_map(
 }
 
 /*
- * Release the buffer bp which was previously acquired with one of the
- * xfs_trans_... buffer allocation routines if the buffer has not
- * been modified within this transaction.  If the buffer is modified
- * within this transaction, do decrement the recursion count but do
- * not release the buffer even if the count goes to 0.  If the buffer is not
- * modified within the transaction, decrement the recursion count and
- * release the buffer if the recursion count goes to 0.
+ * Release a buffer previously joined to the transaction. If the buffer is
+ * modified within this transaction, decrement the recursion count but do not
+ * release the buffer even if the count goes to 0. If the buffer is not modified
+ * within the transaction, decrement the recursion count and release the buffer
+ * if the recursion count goes to 0.
  *
- * If the buffer is to be released and it was not modified before
- * this transaction began, then free the buf_log_item associated with it.
+ * If the buffer is to be released and it was not already dirty before this
+ * transaction began, then also free the buf_log_item associated with it.
  *
- * If the transaction pointer is NULL, make this just a normal
- * brelse() call.
+ * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call.
  */
 void
 xfs_trans_brelse(
-       xfs_trans_t             *tp,
-       xfs_buf_t               *bp)
+       struct xfs_trans        *tp,
+       struct xfs_buf          *bp)
 {
-       struct xfs_buf_log_item *bip;
-       int                     freed;
+       struct xfs_buf_log_item *bip = bp->b_log_item;
 
-       /*
-        * Default to a normal brelse() call if the tp is NULL.
-        */
-       if (tp == NULL) {
-               ASSERT(bp->b_transp == NULL);
+       ASSERT(bp->b_transp == tp);
+
+       if (!tp) {
                xfs_buf_relse(bp);
                return;
        }
 
-       ASSERT(bp->b_transp == tp);
-       bip = bp->b_log_item;
+       trace_xfs_trans_brelse(bip);
        ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
-       ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 
-       trace_xfs_trans_brelse(bip);
-
        /*
-        * If the release is just for a recursive lock,
-        * then decrement the count and return.
+        * If the release is for a recursive lookup, then decrement the count
+        * and return.
         */
        if (bip->bli_recur > 0) {
                bip->bli_recur--;
@@ -372,64 +361,24 @@ xfs_trans_brelse(
        }
 
        /*
-        * If the buffer is dirty within this transaction, we can't
+        * If the buffer is invalidated or dirty in this transaction, we can't
         * release it until we commit.
         */
        if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags))
                return;
-
-       /*
-        * If the buffer has been invalidated, then we can't release
-        * it until the transaction commits to disk unless it is re-dirtied
-        * as part of this transaction.  This prevents us from pulling
-        * the item from the AIL before we should.
-        */
        if (bip->bli_flags & XFS_BLI_STALE)
                return;
 
-       ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
-
        /*
-        * Free up the log item descriptor tracking the released item.
+        * Unlink the log item from the transaction and clear the hold flag, if
+        * set. We wouldn't want the next user of the buffer to get confused.
         */
+       ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
        xfs_trans_del_item(&bip->bli_item);
+       bip->bli_flags &= ~XFS_BLI_HOLD;
 
-       /*
-        * Clear the hold flag in the buf log item if it is set.
-        * We wouldn't want the next user of the buffer to
-        * get confused.
-        */
-       if (bip->bli_flags & XFS_BLI_HOLD) {
-               bip->bli_flags &= ~XFS_BLI_HOLD;
-       }
-
-       /*
-        * Drop our reference to the buf log item.
-        */
-       freed = atomic_dec_and_test(&bip->bli_refcount);
-
-       /*
-        * If the buf item is not tracking data in the log, then we must free it
-        * before releasing the buffer back to the free pool.
-        *
-        * If the fs has shutdown and we dropped the last reference, it may fall
-        * on us to release a (possibly dirty) bli if it never made it to the
-        * AIL (e.g., the aborted unpin already happened and didn't release it
-        * due to our reference). Since we're already shutdown and need
-        * ail_lock, just force remove from the AIL and release the bli here.
-        */
-       if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) {
-               xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR);
-               xfs_buf_item_relse(bp);
-       } else if (!(bip->bli_flags & XFS_BLI_DIRTY)) {
-/***
-               ASSERT(bp->b_pincount == 0);
-***/
-               ASSERT(atomic_read(&bip->bli_refcount) == 0);
-               ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
-               ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
-               xfs_buf_item_relse(bp);
-       }
+       /* drop the reference to the bli */
+       xfs_buf_item_put(bip);
 
        bp->b_transp = NULL;
        xfs_buf_relse(bp);
index 989f8e52864dadc895f799fdf677eed375d5a816..971bb7853776072a2ac9109ddac83c204ab6f67c 100644 (file)
@@ -87,9 +87,10 @@ struct drm_client_dev {
        struct drm_file *file;
 };
 
-int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
-                  const char *name, const struct drm_client_funcs *funcs);
+int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
+                   const char *name, const struct drm_client_funcs *funcs);
 void drm_client_release(struct drm_client_dev *client);
+void drm_client_add(struct drm_client_dev *client);
 
 void drm_client_dev_unregister(struct drm_device *dev);
 void drm_client_dev_hotplug(struct drm_device *dev);
index 582a0ec0aa70448e07b39f5de7f49868e5787953..777814755fa62baff930d0f2cd46e0cec1525500 100644 (file)
@@ -89,7 +89,6 @@ struct drm_panel {
        struct drm_device *drm;
        struct drm_connector *connector;
        struct device *dev;
-       struct device_link *link;
 
        const struct drm_panel_funcs *funcs;
 
index 697161f80eb5528deb8bb32cc4eaf74147fdd9ef..9eb2ec2b2ea99a9e4e8788dad0fdde9784175f44 100644 (file)
 #define VSC8531_LINK_100_1000_ACTIVITY  4
 #define VSC8531_LINK_10_1000_ACTIVITY   5
 #define VSC8531_LINK_10_100_ACTIVITY    6
+#define VSC8584_LINK_100FX_1000X_ACTIVITY      7
 #define VSC8531_DUPLEX_COLLISION        8
 #define VSC8531_COLLISION               9
 #define VSC8531_ACTIVITY                10
+#define VSC8584_100FX_1000X_ACTIVITY   11
 #define VSC8531_AUTONEG_FAULT           12
 #define VSC8531_SERIAL_MODE             13
 #define VSC8531_FORCE_LED_OFF           14
diff --git a/include/dt-bindings/phy/phy-ocelot-serdes.h b/include/dt-bindings/phy/phy-ocelot-serdes.h
new file mode 100644 (file)
index 0000000..bd28f21
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Copyright (c) 2018 Microsemi Corporation */
+#ifndef __PHY_OCELOT_SERDES_H__
+#define __PHY_OCELOT_SERDES_H__
+
+#define SERDES1G(x)    (x)
+#define SERDES1G_MAX   SERDES1G(5)
+#define SERDES6G(x)    (SERDES1G_MAX + 1 + (x))
+#define SERDES6G_MAX   SERDES6G(2)
+#define SERDES_MAX     SERDES6G_MAX
+
+#endif
index b41f7bc958ef8fc706315c4143ce3a978b878178..2c9756bd9c4cdc4b07ac9e8f6158480b0e58d88f 100644 (file)
@@ -252,6 +252,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM      0X00400000
 #define VIRTCHNL_VF_OFFLOAD_ADQ                        0X00800000
 
+/* Define below the capability flags that are not offloads */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED         0x00000080
 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
                               VIRTCHNL_VF_OFFLOAD_VLAN | \
                               VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -596,10 +598,23 @@ enum virtchnl_event_codes {
 struct virtchnl_pf_event {
        enum virtchnl_event_codes event;
        union {
+               /* If the PF driver does not support the new speed reporting
+                * capabilities then use link_event else use link_event_adv to
+                * get the speed and link information. The ability to understand
+                * new speeds is indicated by setting the capability flag
+                * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
+                * in virtchnl_vf_resource struct and can be used to determine
+                * which link event struct to use below.
+                */
                struct {
                        enum virtchnl_link_speed link_speed;
                        bool link_status;
                } link_event;
+               struct {
+                       /* link_speed provided in Mbps */
+                       u32 link_speed;
+                       u8 link_status;
+               } link_event_adv;
        } event_data;
 
        int severity;
index 6ac3cad9aef109171a5882ee2c5afb30e230c8d4..34a744a1bafcbc84c4e2992435f14b71610a4c88 100644 (file)
 #ifndef _LINUX_DNS_RESOLVER_H
 #define _LINUX_DNS_RESOLVER_H
 
-#ifdef __KERNEL__
+#include <uapi/linux/dns_resolver.h>
 
 extern int dns_query(const char *type, const char *name, size_t namelen,
                     const char *options, char **_result, time64_t *_expiry);
 
-#endif /* KERNEL */
-
 #endif /* _LINUX_DNS_RESOLVER_H */
index 6c0b4a1c22ff5bd84c5c4e29ce07b0366b7bc5b1..897eae8faee1b04f12fe8cbebbaae2b4505a771d 100644 (file)
@@ -1828,8 +1828,10 @@ extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
 extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
                                      struct inode *inode_out, loff_t pos_out,
                                      u64 *len, bool is_dedupe);
+extern int do_clone_file_range(struct file *file_in, loff_t pos_in,
+                              struct file *file_out, loff_t pos_out, u64 len);
 extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
-               struct file *file_out, loff_t pos_out, u64 len);
+                               struct file *file_out, loff_t pos_out, u64 len);
 extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
                                         struct inode *dest, loff_t destoff,
                                         loff_t len, bool *is_same);
@@ -2773,19 +2775,6 @@ static inline void file_end_write(struct file *file)
        __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
 }
 
-static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
-                                     struct file *file_out, loff_t pos_out,
-                                     u64 len)
-{
-       int ret;
-
-       file_start_write(file_out);
-       ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
-       file_end_write(file_out);
-
-       return ret;
-}
-
 /*
  * get_write_access() gets write permission for a file.
  * put_write_access() releases this write permission.
index 6b68e345f0ca64da6590817f719796471d0c5c2d..087fd5f48c9128752cf7ff8a872f30afab057381 100644 (file)
@@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
 pte_t *huge_pte_offset(struct mm_struct *mm,
                       unsigned long addr, unsigned long sz);
 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end);
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
                              int write);
 struct page *follow_huge_pd(struct vm_area_struct *vma,
@@ -170,6 +172,18 @@ static inline unsigned long hugetlb_total_pages(void)
        return 0;
 }
 
+static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
+                                       pte_t *ptep)
+{
+       return 0;
+}
+
+static inline void adjust_range_if_pmd_sharing_possible(
+                               struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+{
+}
+
 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n)        ({ BUG(); 0; })
 #define follow_huge_addr(mm, addr, write)      ERR_PTR(-EINVAL)
 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
new file mode 100644 (file)
index 0000000..22443d7
--- /dev/null
@@ -0,0 +1,76 @@
+#ifndef __LINKMODE_H
+#define __LINKMODE_H
+
+#include <linux/bitmap.h>
+#include <linux/ethtool.h>
+#include <uapi/linux/ethtool.h>
+
+static inline void linkmode_zero(unsigned long *dst)
+{
+       bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_copy(unsigned long *dst, const unsigned long *src)
+{
+       bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_and(unsigned long *dst, const unsigned long *a,
+                               const unsigned long *b)
+{
+       bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_or(unsigned long *dst, const unsigned long *a,
+                               const unsigned long *b)
+{
+       bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline bool linkmode_empty(const unsigned long *src)
+{
+       return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1,
+                                 const unsigned long *src2)
+{
+       return bitmap_andnot(dst, src1, src2,  __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
+{
+       __set_bit(nr, addr);
+}
+
+static inline void linkmode_set_bit_array(const int *array, int array_size,
+                                         unsigned long *addr)
+{
+       int i;
+
+       for (i = 0; i < array_size; i++)
+               linkmode_set_bit(array[i], addr);
+}
+
+static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
+{
+       __clear_bit(nr, addr);
+}
+
+static inline void linkmode_change_bit(int nr, volatile unsigned long *addr)
+{
+       __change_bit(nr, addr);
+}
+
+static inline int linkmode_test_bit(int nr, volatile unsigned long *addr)
+{
+       return test_bit(nr, addr);
+}
+
+static inline int linkmode_equal(const unsigned long *src1,
+                                const unsigned long *src2)
+{
+       return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+#endif /* __LINKMODE_H */
index a528747f8aedb048b873353d40426d76058cffd1..e8338e5dc10bfd5e6bb415337ff3667c39e008f1 100644 (file)
@@ -78,9 +78,9 @@ enum {
        BD71837_REG_TRANS_COND0        = 0x1F,
        BD71837_REG_TRANS_COND1        = 0x20,
        BD71837_REG_VRFAULTEN          = 0x21,
-       BD71837_REG_MVRFLTMASK0        = 0x22,
-       BD71837_REG_MVRFLTMASK1        = 0x23,
-       BD71837_REG_MVRFLTMASK2        = 0x24,
+       BD718XX_REG_MVRFLTMASK0        = 0x22,
+       BD718XX_REG_MVRFLTMASK1        = 0x23,
+       BD718XX_REG_MVRFLTMASK2        = 0x24,
        BD71837_REG_RCVCFG             = 0x25,
        BD71837_REG_RCVNUM             = 0x26,
        BD71837_REG_PWRONCONFIG0       = 0x27,
@@ -159,6 +159,33 @@ enum {
 #define BUCK8_MASK             0x3F
 #define BUCK8_DEFAULT          0x1E
 
+/* BD718XX Voltage monitoring masks */
+#define BD718XX_BUCK1_VRMON80           0x1
+#define BD718XX_BUCK1_VRMON130          0x2
+#define BD718XX_BUCK2_VRMON80           0x4
+#define BD718XX_BUCK2_VRMON130          0x8
+#define BD718XX_1ST_NODVS_BUCK_VRMON80  0x1
+#define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2
+#define BD718XX_2ND_NODVS_BUCK_VRMON80  0x4
+#define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8
+#define BD718XX_3RD_NODVS_BUCK_VRMON80  0x10
+#define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20
+#define BD718XX_4TH_NODVS_BUCK_VRMON80  0x40
+#define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80
+#define BD718XX_LDO1_VRMON80            0x1
+#define BD718XX_LDO2_VRMON80            0x2
+#define BD718XX_LDO3_VRMON80            0x4
+#define BD718XX_LDO4_VRMON80            0x8
+#define BD718XX_LDO5_VRMON80            0x10
+#define BD718XX_LDO6_VRMON80            0x20
+
+/* BD71837 specific voltage monitoring masks */
+#define BD71837_BUCK3_VRMON80           0x10
+#define BD71837_BUCK3_VRMON130          0x20
+#define BD71837_BUCK4_VRMON80           0x40
+#define BD71837_BUCK4_VRMON130          0x80
+#define BD71837_LDO7_VRMON80            0x40
+
 /* BD71837_REG_IRQ bits */
 #define IRQ_SWRST              0x40
 #define IRQ_PWRON_S            0x20
index 55000ee5c6ad7eb31bc7c3d587b85a0c6d138cc9..2da85b02e1c0655cc7e45e4feb9687bd2127c21b 100644 (file)
@@ -10,6 +10,7 @@
 
 
 #include <linux/if.h>
+#include <linux/linkmode.h>
 #include <uapi/linux/mii.h>
 
 struct ethtool_cmd;
@@ -131,6 +132,34 @@ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
        return result;
 }
 
+/**
+ * linkmode_adv_to_mii_adv_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_ADVERTISE register.
+ */
+static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising)
+{
+       u32 result = 0;
+
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, advertising))
+               result |= ADVERTISE_10HALF;
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, advertising))
+               result |= ADVERTISE_10FULL;
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, advertising))
+               result |= ADVERTISE_100HALF;
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, advertising))
+               result |= ADVERTISE_100FULL;
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
+               result |= ADVERTISE_PAUSE_CAP;
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
+               result |= ADVERTISE_PAUSE_ASYM;
+
+       return result;
+}
+
 /**
  * mii_adv_to_ethtool_adv_t
  * @adv: value of the MII_ADVERTISE register
@@ -178,6 +207,28 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
        return result;
 }
 
+/**
+ * linkmode_adv_to_mii_ctrl1000_t
+ * advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000T mode.
+ */
+static inline u32 linkmode_adv_to_mii_ctrl1000_t(unsigned long *advertising)
+{
+       u32 result = 0;
+
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+                             advertising))
+               result |= ADVERTISE_1000HALF;
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+                             advertising))
+               result |= ADVERTISE_1000FULL;
+
+       return result;
+}
+
 /**
  * mii_ctrl1000_to_ethtool_adv_t
  * @adv: value of the MII_CTRL1000 register
@@ -302,6 +353,56 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
        return result | mii_adv_to_ethtool_adv_x(lpa);
 }
 
+/**
+ * mii_adv_to_linkmode_adv_t
+ * @advertising:pointer to destination link mode.
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits
+ * to linkmode advertisement settings.
+ */
+static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising,
+                                            u32 adv)
+{
+       linkmode_zero(advertising);
+
+       if (adv & ADVERTISE_10HALF)
+               linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+                                advertising);
+       if (adv & ADVERTISE_10FULL)
+               linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+                                advertising);
+       if (adv & ADVERTISE_100HALF)
+               linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+                                advertising);
+       if (adv & ADVERTISE_100FULL)
+               linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+                                advertising);
+       if (adv & ADVERTISE_PAUSE_CAP)
+               linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising);
+       if (adv & ADVERTISE_PAUSE_ASYM)
+               linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
+}
+
+/**
+ * ethtool_adv_to_lcl_adv_t
+ * @advertising:pointer to ethtool advertising
+ *
+ * A small helper function that translates ethtool advertising to LVL
+ * pause capabilities.
+ */
+static inline u32 ethtool_adv_to_lcl_adv_t(u32 advertising)
+{
+       u32 lcl_adv = 0;
+
+       if (advertising & ADVERTISED_Pause)
+               lcl_adv |= ADVERTISE_PAUSE_CAP;
+       if (advertising & ADVERTISED_Asym_Pause)
+               lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+       return lcl_adv;
+}
+
 /**
  * mii_advertise_flowctrl - get flow control advertisement flags
  * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
index 11fa4e66afc5d5cc983ebf40522f79595031ea58..e9b502d5bcc10f19abbb1f0c75d53884e79ad595 100644 (file)
@@ -504,6 +504,10 @@ struct health_buffer {
        __be16          ext_synd;
 };
 
+enum mlx5_cmd_addr_l_sz_offset {
+       MLX5_NIC_IFC_OFFSET = 8,
+};
+
 struct mlx5_init_seg {
        __be32                  fw_rev;
        __be32                  cmdif_rev_fw_sub;
index ed73b51f6697bd5b79c4c991114835227ffc572d..26a92462f4ce885e9717cc7da7130f7d1194e619 100644 (file)
@@ -838,6 +838,7 @@ struct mlx5_core_dev {
                u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
                u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
        } caps;
+       u64                     sys_image_guid;
        phys_addr_t             iseg_base;
        struct mlx5_init_seg __iomem *iseg;
        enum mlx5_device_state  state;
index f043d65b9bac2d65b0b8c8053e22a9051d03769f..6e8a882052b1973ed845098179941e4e94a35732 100644 (file)
@@ -896,7 +896,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         log_max_mkey[0x6];
        u8         reserved_at_f0[0x8];
        u8         dump_fill_mkey[0x1];
-       u8         reserved_at_f9[0x3];
+       u8         reserved_at_f9[0x2];
+       u8         fast_teardown[0x1];
        u8         log_max_eq[0x4];
 
        u8         max_indirection[0x8];
@@ -3352,12 +3353,13 @@ struct mlx5_ifc_teardown_hca_out_bits {
 
        u8         reserved_at_40[0x3f];
 
-       u8         force_state[0x1];
+       u8         state[0x1];
 };
 
 enum {
        MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE  = 0x0,
        MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE     = 0x1,
+       MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN = 0x2,
 };
 
 struct mlx5_ifc_teardown_hca_in_bits {
index 83a33a1873a6823be1a033dec3f0743a08732f88..7f5ca2cd3a32f7438f3f1ab39ad47422a701b53b 100644 (file)
@@ -90,6 +90,8 @@ struct mlx5_hairpin {
 
        u32 *rqn;
        u32 *sqn;
+
+       bool peer_gone;
 };
 
 struct mlx5_hairpin *
index 7e7c6dfcfb0900b8f15d691779f1d014e8d9ce21..9c694808c212b9cbd6e127378412f46598083026 100644 (file)
@@ -121,4 +121,6 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
 int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
                                       struct mlx5_core_dev *port_mdev);
 int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
+
+u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
 #endif /* __MLX5_VPORT_H__ */
index a61ebe8ad4ca92e72e23855c17f8e7c9ad059a54..0416a7204be37b331a506efedc5c4c1333633a6a 100644 (file)
@@ -2455,6 +2455,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
        return vma;
 }
 
+static inline bool range_in_vma(struct vm_area_struct *vma,
+                               unsigned long start, unsigned long end)
+{
+       return (vma && vma->vm_start <= start && end <= vma->vm_end);
+}
+
 #ifdef CONFIG_MMU
 pgprot_t vm_get_page_prot(unsigned long vm_flags);
 void vma_set_page_prot(struct vm_area_struct *vma);
index 1e22d96734e0099476b18d14ca7c11a991d3a470..3f4c0b167333a37ca1a5b53cc3259904d5fc91ad 100644 (file)
@@ -671,12 +671,6 @@ typedef struct pglist_data {
 #ifdef CONFIG_NUMA_BALANCING
        /* Lock serializing the migrate rate limiting window */
        spinlock_t numabalancing_migrate_lock;
-
-       /* Rate limiting time interval */
-       unsigned long numabalancing_migrate_next_window;
-
-       /* Number of pages migrated during the rate limiting time interval */
-       unsigned long numabalancing_migrate_nr_pages;
 #endif
        /*
         * This is a per-node reserve of pages that are not available
index 8318f79586c28718a18d4c57287d2e6b7a5886f3..76603ee136a85ca7b5d163f6d57c347e76843121 100644 (file)
@@ -1762,6 +1762,8 @@ enum netdev_priv_flags {
  *                     switch driver and used to set the phys state of the
  *                     switch port.
  *
+ *     @wol_enabled:   Wake-on-LAN is enabled
+ *
  *     FIXME: cleanup struct net_device such that network protocol info
  *     moves out.
  */
@@ -2045,6 +2047,7 @@ struct net_device {
        struct lock_class_key   *qdisc_tx_busylock;
        struct lock_class_key   *qdisc_running_key;
        bool                    proto_down;
+       unsigned                wol_enabled:1;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
 
index 07efffd0c759d0b509dec19acb6b718cbab06031..bbe99d2b28b4c62063450b7c4dadc4013c377897 100644 (file)
@@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
                break;
        case NFPROTO_ARP:
 #ifdef CONFIG_NETFILTER_FAMILY_ARP
+               if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
+                       break;
                hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
 #endif
                break;
index 03097fa70975434ed686d99b4cda8171749656e5..e142b2b5f1ea6715f0280dd0d9ded623322efee5 100644 (file)
@@ -19,7 +19,4 @@ struct ip_conntrack_stat {
        unsigned int search_restart;
 };
 
-/* call to create an explicit dependency on nf_conntrack. */
-void need_conntrack(void);
-
 #endif /* _NF_CONNTRACK_COMMON_H */
index 71f121b66ca896a455d4ebf310c94d4a0472eaff..72580f1a72a224cf5cf53c4b0111d3bf0a42e8a6 100644 (file)
@@ -176,8 +176,10 @@ struct netlink_callback {
        void                    *data;
        /* the module that dump function belong to */
        struct module           *module;
+       struct netlink_ext_ack  *extack;
        u16                     family;
        u16                     min_dump_alloc;
+       bool                    strict_check;
        unsigned int            prev_seq, seq;
        long                    args[6];
 };
index 192a1fa0c73baf48d0af8919dc491bdcbb79673d..3ea87f774a76c87774cb69d83b5d4272b6336481 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/compiler.h>
 #include <linux/spinlock.h>
 #include <linux/ethtool.h>
+#include <linux/linkmode.h>
 #include <linux/mdio.h>
 #include <linux/mii.h>
 #include <linux/module.h>
 #define PHY_1000BT_FEATURES    (SUPPORTED_1000baseT_Half | \
                                 SUPPORTED_1000baseT_Full)
 
-#define PHY_BASIC_FEATURES     (PHY_10BT_FEATURES | \
-                                PHY_100BT_FEATURES | \
-                                PHY_DEFAULT_FEATURES)
-
-#define PHY_GBIT_FEATURES      (PHY_BASIC_FEATURES | \
-                                PHY_1000BT_FEATURES)
-
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
+
+#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
+#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features)
+#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features)
+#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
+#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
+#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
+#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
 
 /*
  * Set phydev->irq to PHY_POLL if interrupts are not supported,
@@ -509,7 +518,7 @@ struct phy_driver {
        u32 phy_id;
        char *name;
        u32 phy_id_mask;
-       u32 features;
+       const unsigned long * const features;
        u32 flags;
        const void *driver_data;
 
@@ -967,6 +976,12 @@ static inline void phy_device_reset(struct phy_device *phydev, int value)
 #define phydev_err(_phydev, format, args...)   \
        dev_err(&_phydev->mdio.dev, format, ##args)
 
+#define phydev_info(_phydev, format, args...)  \
+       dev_info(&_phydev->mdio.dev, format, ##args)
+
+#define phydev_warn(_phydev, format, args...)  \
+       dev_warn(&_phydev->mdio.dev, format, ##args)
+
 #define phydev_dbg(_phydev, format, args...)   \
        dev_dbg(&_phydev->mdio.dev, format, ##args)
 
@@ -1039,7 +1054,7 @@ void phy_change_work(struct work_struct *work);
 void phy_mac_interrupt(struct phy_device *phydev);
 void phy_start_machine(struct phy_device *phydev);
 void phy_stop_machine(struct phy_device *phydev);
-void phy_trigger_machine(struct phy_device *phydev, bool sync);
+void phy_trigger_machine(struct phy_device *phydev);
 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
 void phy_ethtool_ksettings_get(struct phy_device *phydev,
                               struct ethtool_link_ksettings *cmd);
index 9713aebdd348b1d9bb73ecbe2b82efba3f929695..03b319f89a345b37c89c7104d6bc48c1a3d66dc8 100644 (file)
@@ -37,9 +37,11 @@ enum phy_mode {
        PHY_MODE_USB_OTG,
        PHY_MODE_SGMII,
        PHY_MODE_2500SGMII,
+       PHY_MODE_QSGMII,
        PHY_MODE_10GKR,
        PHY_MODE_UFS_HS_A,
        PHY_MODE_UFS_HS_B,
+       PHY_MODE_PCIE,
 };
 
 /**
index 8cd34645e892623b71d5ee446dfb6a4423594e93..dee3c9c744f7526bbcb8ffa061ab22e17a9c13a4 100644 (file)
@@ -670,10 +670,11 @@ enum qed_link_mode_bits {
        QED_LM_1000baseT_Half_BIT = BIT(4),
        QED_LM_1000baseT_Full_BIT = BIT(5),
        QED_LM_10000baseKR_Full_BIT = BIT(6),
-       QED_LM_25000baseKR_Full_BIT = BIT(7),
-       QED_LM_40000baseLR4_Full_BIT = BIT(8),
-       QED_LM_50000baseKR2_Full_BIT = BIT(9),
-       QED_LM_100000baseKR4_Full_BIT = BIT(10),
+       QED_LM_20000baseKR2_Full_BIT = BIT(7),
+       QED_LM_25000baseKR_Full_BIT = BIT(8),
+       QED_LM_40000baseLR4_Full_BIT = BIT(9),
+       QED_LM_50000baseKR2_Full_BIT = BIT(10),
+       QED_LM_100000baseKR4_Full_BIT = BIT(11),
        QED_LM_COUNT = 11
 };
 
index 3468703d663af6d94bbf6fd07343d8bd9418b81a..a459a5e973a7294f171e192dd4dacf98d5b27c16 100644 (file)
@@ -48,9 +48,9 @@ struct regulator;
  * DISABLE_IN_SUSPEND  - turn off regulator in suspend states
  * ENABLE_IN_SUSPEND   - keep regulator on in suspend states
  */
-#define DO_NOTHING_IN_SUSPEND  (-1)
-#define DISABLE_IN_SUSPEND     0
-#define ENABLE_IN_SUSPEND      1
+#define DO_NOTHING_IN_SUSPEND  0
+#define DISABLE_IN_SUSPEND     1
+#define ENABLE_IN_SUSPEND      2
 
 /* Regulator active discharge flags */
 enum regulator_active_discharge {
index 87e29710373f475816adc7acce7d46accedbc872..119d092c6b1324111ee81bb7ef0ab4b0fff7749d 100644 (file)
@@ -1082,11 +1082,6 @@ static inline int skb_pad(struct sk_buff *skb, int pad)
 }
 #define dev_kfree_skb(a)       consume_skb(a)
 
-int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
-                           int getfrag(void *from, char *to, int offset,
-                                       int len, int odd, struct sk_buff *skb),
-                           void *from, int length);
-
 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
                         int offset, size_t size);
 
index b2bd4b4127c46a2e8f2eb103d989b629d7169570..69ee30456864a05ceb76bac1d0dbe8df3a0e3448 100644 (file)
@@ -81,8 +81,10 @@ enum spi_mem_data_dir {
  * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
  * @data.buswidth: number of IO lanes used to send/receive the data
  * @data.dir: direction of the transfer
- * @data.buf.in: input buffer
- * @data.buf.out: output buffer
+ * @data.nbytes: number of data bytes to send/receive. Can be zero if the
+ *              operation does not involve transferring data
+ * @data.buf.in: input buffer (must be DMA-able)
+ * @data.buf.out: output buffer (must be DMA-able)
  */
 struct spi_mem_op {
        struct {
@@ -105,7 +107,6 @@ struct spi_mem_op {
                u8 buswidth;
                enum spi_mem_data_dir dir;
                unsigned int nbytes;
-               /* buf.{in,out} must be DMA-able. */
                union {
                        void *in;
                        const void *out;
index 409c845d4cd3dce5762c27a2e7e7c1c5c288f134..422b1c01ee0de0d679d7f6cb4276bb7d45e82186 100644 (file)
@@ -172,7 +172,7 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
 static __always_inline __must_check
 size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
 {
-       if (unlikely(!check_copy_size(addr, bytes, false)))
+       if (unlikely(!check_copy_size(addr, bytes, true)))
                return 0;
        else
                return _copy_to_iter_mcsafe(addr, bytes, i);
index e2ec3582e54937d3818afed3e253440fc23541a0..d8860f2d0976d1f66594a8ed502deb8135a15878 100644 (file)
@@ -28,7 +28,7 @@ struct usbnet {
        /* housekeeping */
        struct usb_device       *udev;
        struct usb_interface    *intf;
-       struct driver_info      *driver_info;
+       const struct driver_info *driver_info;
        const char              *driver_name;
        void                    *driver_priv;
        wait_queue_head_t       wait;
index 9397628a196714dc2177552465fe91fd18b9627d..cb462f9ab7dd592bc1d613c86aefeb787cdd9321 100644 (file)
@@ -5,6 +5,24 @@
 #include <linux/if_vlan.h>
 #include <uapi/linux/virtio_net.h>
 
+static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
+                                          const struct virtio_net_hdr *hdr)
+{
+       switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+       case VIRTIO_NET_HDR_GSO_TCPV4:
+       case VIRTIO_NET_HDR_GSO_UDP:
+               skb->protocol = cpu_to_be16(ETH_P_IP);
+               break;
+       case VIRTIO_NET_HDR_GSO_TCPV6:
+               skb->protocol = cpu_to_be16(ETH_P_IPV6);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                                        const struct virtio_net_hdr *hdr,
                                        bool little_endian)
index ea73fef8bdc021b48e68b4b4ce8bb7fe7fc44b57..8586cfb498286ce4399487f4f4627ff2a0b16e9f 100644 (file)
@@ -38,10 +38,13 @@ struct v4l2_ctrl_handler;
  * @prio: priority of the file handler, as defined by &enum v4l2_priority
  *
  * @wait: event' s wait queue
+ * @subscribe_lock: serialise changes to the subscribed list; guarantee that
+ *                 the add and del event callbacks are orderly called
  * @subscribed: list of subscribed events
  * @available: list of events waiting to be dequeued
  * @navailable: number of available events at @available list
  * @sequence: event sequence number
+ *
  * @m2m_ctx: pointer to &struct v4l2_m2m_ctx
  */
 struct v4l2_fh {
@@ -52,6 +55,7 @@ struct v4l2_fh {
 
        /* Events */
        wait_queue_head_t       wait;
+       struct mutex            subscribe_lock;
        struct list_head        subscribed;
        struct list_head        available;
        unsigned int            navailable;
index 1ddff3360592d7cb4d46c2d06d55bb66fc2f5582..05c7df41d7375ae2c2ac6554b75af7e74afdf781 100644 (file)
@@ -13,7 +13,7 @@
 #include <net/netns/generic.h>
 
 struct tcf_idrinfo {
-       spinlock_t      lock;
+       struct mutex    lock;
        struct idr      action_idr;
 };
 
@@ -117,7 +117,7 @@ int tc_action_net_init(struct tc_action_net *tn,
        if (!tn->idrinfo)
                return -ENOMEM;
        tn->ops = ops;
-       spin_lock_init(&tn->idrinfo->lock);
+       mutex_init(&tn->idrinfo->lock);
        idr_init(&tn->idrinfo->action_idr);
        return err;
 }
index f53edb3754bc4fe5203794dc444bc328dafc9c38..de587948042a4ab6ce9e00dac021ce492a026621 100644 (file)
@@ -13,6 +13,7 @@
 #define _NET_RXRPC_H
 
 #include <linux/rxrpc.h>
+#include <linux/ktime.h>
 
 struct key;
 struct sock;
@@ -77,5 +78,8 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
 int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
                            enum rxrpc_call_completion *, u32 *);
 u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *);
+u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
+bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
+                                ktime_t *);
 
 #endif /* _NET_RXRPC_H */
index cdd9f1fe7cfa903ea79f54ac0b17b1d946d81d92..c36dc1e20556aa2eb5924cc6978f4dee4757ac48 100644 (file)
@@ -1517,6 +1517,20 @@ struct hci_cp_le_write_def_data_len {
        __le16  tx_time;
 } __packed;
 
+#define HCI_OP_LE_ADD_TO_RESOLV_LIST   0x2027
+struct hci_cp_le_add_to_resolv_list {
+       __u8     bdaddr_type;
+       bdaddr_t bdaddr;
+       __u8     peer_irk[16];
+       __u8     local_irk[16];
+} __packed;
+
+#define HCI_OP_LE_DEL_FROM_RESOLV_LIST 0x2028
+struct hci_cp_le_del_from_resolv_list {
+       __u8     bdaddr_type;
+       bdaddr_t bdaddr;
+} __packed;
+
 #define HCI_OP_LE_CLEAR_RESOLV_LIST    0x2029
 
 #define HCI_OP_LE_READ_RESOLV_LIST_SIZE        0x202a
index 0db1b9b428b7d22b87ee50bed3491a672a6a76cb..e5ea633ea36880176ac608e20570f46f455c35d2 100644 (file)
@@ -103,6 +103,14 @@ struct bdaddr_list {
        u8 bdaddr_type;
 };
 
+struct bdaddr_list_with_irk {
+       struct list_head list;
+       bdaddr_t bdaddr;
+       u8 bdaddr_type;
+       u8 peer_irk[16];
+       u8 local_irk[16];
+};
+
 struct bt_uuid {
        struct list_head list;
        u8 uuid[16];
@@ -259,6 +267,8 @@ struct hci_dev {
        __u16           le_max_tx_time;
        __u16           le_max_rx_len;
        __u16           le_max_rx_time;
+       __u8            le_max_key_size;
+       __u8            le_min_key_size;
        __u16           discov_interleaved_timeout;
        __u16           conn_info_min_age;
        __u16           conn_info_max_age;
@@ -1058,8 +1068,15 @@ int hci_inquiry(void __user *arg);
 
 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list,
                                           bdaddr_t *bdaddr, u8 type);
+struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
+                                   struct list_head *list, bdaddr_t *bdaddr,
+                                   u8 type);
 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type);
+int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
+                                       u8 type, u8 *peer_irk, u8 *local_irk);
 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
+int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
+                                                               u8 type);
 void hci_bdaddr_list_clear(struct list_head *list);
 
 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
index 0697fd41308777a4801dcf8728f2ff5aa0210804..3555440e14fc910c6355468cb0468e9e8f8796ce 100644 (file)
@@ -455,9 +455,6 @@ struct l2cap_conn_param_update_rsp {
 #define L2CAP_CONN_PARAM_ACCEPTED      0x0000
 #define L2CAP_CONN_PARAM_REJECTED      0x0001
 
-#define L2CAP_LE_MAX_CREDITS           10
-#define L2CAP_LE_DEFAULT_MPS           230
-
 struct l2cap_le_conn_req {
        __le16     psm;
        __le16     scid;
index a2d058170ea3c38739263570bcf14f2a0935e16f..b46d68acf7011f39d86eb65ad2d9886f650f144e 100644 (file)
@@ -139,12 +139,6 @@ struct bond_parm_tbl {
        int mode;
 };
 
-struct netdev_notify_work {
-       struct delayed_work     work;
-       struct net_device       *dev;
-       struct netdev_bonding_info bonding_info;
-};
-
 struct slave {
        struct net_device *dev; /* first - useful for panic debug */
        struct bonding *bond; /* our master */
@@ -172,6 +166,7 @@ struct slave {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll *np;
 #endif
+       struct delayed_work notify_work;
        struct kobject kobj;
        struct rtnl_link_stats64 slave_stats;
 };
index 9f3ed79c39d76bbe64c0ee21ca9ebb16ad783477..1ec67536bbab38b8bd3210af7773feb220ddbb08 100644 (file)
@@ -4865,8 +4865,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
  *
  * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
  * @freq: the freqency(in MHz) to be queried.
- * @ptr: pointer where the regdb wmm data is to be stored (or %NULL if
- *     irrelevant). This can be used later for deduplication.
  * @rule: pointer to store the wmm rule from the regulatory db.
  *
  * Self-managed wireless drivers can use this function to  query
index b9b89d6604d402eb7ab83f1273526eba7fec42d8..9a70755ad1c2f61fd62ec064bbedd0001af0ad1f 100644 (file)
@@ -362,6 +362,9 @@ enum devlink_param_generic_id {
        DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
        DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
        DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+       DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI,
+       DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+       DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
 
        /* add new param generic ids above here*/
        __DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -380,6 +383,15 @@ enum devlink_param_generic_id {
 #define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable"
 #define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL
 
+#define DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME "ignore_ari"
+#define DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME "msix_vec_per_pf_max"
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME "msix_vec_per_pf_min"
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE DEVLINK_PARAM_TYPE_U32
+
 #define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate)     \
 {                                                                      \
        .id = DEVLINK_PARAM_GENERIC_ID_##_id,                           \
@@ -451,11 +463,14 @@ struct devlink_ops {
                                       u32 *p_cur, u32 *p_max);
 
        int (*eswitch_mode_get)(struct devlink *devlink, u16 *p_mode);
-       int (*eswitch_mode_set)(struct devlink *devlink, u16 mode);
+       int (*eswitch_mode_set)(struct devlink *devlink, u16 mode,
+                               struct netlink_ext_ack *extack);
        int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode);
-       int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode);
+       int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode,
+                                      struct netlink_ext_ack *extack);
        int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode);
-       int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode);
+       int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode,
+                                     struct netlink_ext_ack *extack);
 };
 
 static inline void *devlink_priv(struct devlink *devlink)
index e03b93360f332b3e3232873ac1cbd0ee7478fabb..a80fd0ac4563283246f4f53cea1ac0cd17b41dab 100644 (file)
@@ -130,12 +130,6 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
        return sk->sk_bound_dev_if;
 }
 
-static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
-{
-       return rcu_dereference_check(ireq->ireq_opt,
-                                    refcount_read(&ireq->req.rsk_refcnt) > 0);
-}
-
 struct inet_cork {
        unsigned int            flags;
        __be32                  addr;
index e44b1a44f67ad447528f1c59f05915157e016154..72593e171d14c83cc2c005f219bf7277f25b24f5 100644 (file)
@@ -420,8 +420,35 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
        return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
 }
 
-int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len,
-                      u32 *metrics);
+struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+                                       int fc_mx_len);
+static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
+{
+       if (fib_metrics != &dst_default_metrics &&
+           refcount_dec_and_test(&fib_metrics->refcnt))
+               kfree(fib_metrics);
+}
+
+/* ipv4 and ipv6 both use refcounted metrics if it is not the default */
+static inline
+void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
+{
+       dst_init_metrics(dst, fib_metrics->metrics, true);
+
+       if (fib_metrics != &dst_default_metrics) {
+               dst->_metrics |= DST_METRICS_REFCOUNTED;
+               refcount_inc(&fib_metrics->refcnt);
+       }
+}
+
+static inline
+void ip_dst_metrics_put(struct dst_entry *dst)
+{
+       struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
+
+       if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
+               kfree(p);
+}
 
 u32 ip_idents_reserve(u32 hash, int segs);
 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
index 7b9c82de11cc9388b070992af610e5fd14b66333..cef186dbd2ce5131b994eb04aeeea541516d7879 100644 (file)
@@ -165,8 +165,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
                  kuid_t uid);
-void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
-                           u32 mark);
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif);
 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
 
 struct netlink_callback;
index f7c109e372987c06f379c673a2e99635e2da477b..9846b79c9ee194e9e4242f5fbc56728c49eccfd5 100644 (file)
@@ -452,4 +452,6 @@ static inline void fib_proc_exit(struct net *net)
 
 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr);
 
+int ip_valid_fib_dump_req(const struct nlmsghdr *nlh,
+                         struct netlink_ext_ack *extack);
 #endif  /* _NET_FIB_H */
index c84b51682f08c68b7dbddcdb9f8b82b2465cefa6..135ee702c7b0373cb22deca14b87e1e7e303b5a9 100644 (file)
 #ifndef _NF_CONNTRACK_IPV4_H
 #define _NF_CONNTRACK_IPV4_H
 
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp;
 extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
 #ifdef CONFIG_NF_CT_PROTO_DCCP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp;
 #endif
 #ifdef CONFIG_NF_CT_PROTO_SCTP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp;
 #endif
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite;
 #endif
 
-int nf_conntrack_ipv4_compat_init(void);
-void nf_conntrack_ipv4_compat_fini(void);
-
 #endif /*_NF_CONNTRACK_IPV4_H*/
index effa8dfba68ce9648001ce518eb4940d3a8d4c37..7b3c873f883966386ce6f1f2b5cd60422090ecab 100644 (file)
@@ -2,20 +2,7 @@
 #ifndef _NF_CONNTRACK_IPV6_H
 #define _NF_CONNTRACK_IPV6_H
 
-extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
-
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
 extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
-#ifdef CONFIG_NF_CT_PROTO_DCCP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
-#endif
-#ifdef CONFIG_NF_CT_PROTO_SCTP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6;
-#endif
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
-#endif
 
 #include <linux/sysctl.h>
 extern struct ctl_table nf_ct_ipv6_sysctl_table[];
index 2a3e0974a6af4029ecba39cf4bb41d2c46d77282..afc9b3620473e96dc2807b0891c8fdce5e46fa25 100644 (file)
@@ -20,8 +20,7 @@
 /* This header is used to share core functionality between the
    standalone connection tracking module, and the compatibility layer's use
    of connection tracking. */
-unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
-                            struct sk_buff *skb);
+unsigned int nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state);
 
 int nf_conntrack_init_net(struct net *net);
 void nf_conntrack_cleanup_net(struct net *net);
index 8465263b297d70921678fa4c9bf3281c18aaa4ff..eed04af9b75e56b6c33d0887cdefa4c8f827251e 100644 (file)
@@ -18,9 +18,6 @@
 struct seq_file;
 
 struct nf_conntrack_l4proto {
-       /* L3 Protocol number. */
-       u_int16_t l3proto;
-
        /* L4 Protocol number. */
        u_int8_t l4proto;
 
@@ -43,22 +40,14 @@ struct nf_conntrack_l4proto {
 
        /* Returns verdict for packet, or -1 for invalid. */
        int (*packet)(struct nf_conn *ct,
-                     const struct sk_buff *skb,
+                     struct sk_buff *skb,
                      unsigned int dataoff,
-                     enum ip_conntrack_info ctinfo);
-
-       /* Called when a new connection for this protocol found;
-        * returns TRUE if it's OK.  If so, packet() called next. */
-       bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
-                   unsigned int dataoff);
+                     enum ip_conntrack_info ctinfo,
+                     const struct nf_hook_state *state);
 
        /* Called when a conntrack entry is destroyed */
        void (*destroy)(struct nf_conn *ct);
 
-       int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
-                    unsigned int dataoff,
-                    u_int8_t pf, unsigned int hooknum);
-
        /* called by gc worker if table is full */
        bool (*can_early_drop)(const struct nf_conn *ct);
 
@@ -92,7 +81,7 @@ struct nf_conntrack_l4proto {
 #endif
        unsigned int    *net_id;
        /* Init l4proto pernet data */
-       int (*init_net)(struct net *net, u_int16_t proto);
+       int (*init_net)(struct net *net);
 
        /* Return the per-net protocol part. */
        struct nf_proto_net *(*get_net_proto)(struct net *net);
@@ -101,16 +90,23 @@ struct nf_conntrack_l4proto {
        struct module *me;
 };
 
+int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
+                             struct sk_buff *skb,
+                             unsigned int dataoff,
+                             const struct nf_hook_state *state);
+
+int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
+                             struct sk_buff *skb,
+                             unsigned int dataoff,
+                             const struct nf_hook_state *state);
 /* Existing built-in generic protocol */
 extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
 
-#define MAX_NF_CT_PROTO 256
+#define MAX_NF_CT_PROTO IPPROTO_UDPLITE
 
-const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto,
-                                                 u_int8_t l4proto);
+const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto);
 
-const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto,
-                                                   u_int8_t l4proto);
+const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4proto);
 void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p);
 
 /* Protocol pernet registration. */
index 0f39ac487012c3075a3154c65b22504a15063dcd..841835a387e17849155ae85f63b3197e747f3469 100644 (file)
@@ -470,6 +470,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
                       struct nft_set_binding *binding);
 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
                          struct nft_set_binding *binding);
+void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
+                         struct nft_set_binding *binding);
+void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
 
 /**
  *     enum nft_set_extensions - set extension type IDs
@@ -724,7 +727,9 @@ struct nft_expr_type {
  *     @eval: Expression evaluation function
  *     @size: full expression size, including private data size
  *     @init: initialization function
- *     @destroy: destruction function
+ *     @activate: activate expression in the next generation
+ *     @deactivate: deactivate expression in next generation
+ *     @destroy: destruction function, called after synchronize_rcu
  *     @dump: function to dump parameters
  *     @type: expression type
  *     @validate: validate expression, called during loop detection
@@ -1293,12 +1298,14 @@ static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
  *
  *     @list: used internally
  *     @msg_type: message type
+ *     @put_net: ctx->net needs to be put
  *     @ctx: transaction context
  *     @data: internal information related to the transaction
  */
 struct nft_trans {
        struct list_head                list;
        int                             msg_type;
+       bool                            put_net;
        struct nft_ctx                  ctx;
        char                            data[0];
 };
index 8da837d2aaf997d5d642b081154c1bf9d2b3b26b..2046d104f323645341e4c8bdca369b2589120811 100644 (file)
@@ -16,6 +16,10 @@ extern struct nft_expr_type nft_meta_type;
 extern struct nft_expr_type nft_rt_type;
 extern struct nft_expr_type nft_exthdr_type;
 
+#ifdef CONFIG_NETWORK_SECMARK
+extern struct nft_object_type nft_secmark_obj_type;
+#endif
+
 int nf_tables_core_module_init(void);
 void nf_tables_core_module_exit(void);
 
index 318b1ded3833392a9b4ca8f6f997a9db8f70a1f1..f1db8e594847a7503f083182ba4a717b1a628e40 100644 (file)
  *   nla_find()                                find attribute in stream of attributes
  *   nla_find_nested()                 find attribute in nested attributes
  *   nla_parse()                       parse and validate stream of attrs
- *   nla_parse_nested()                        parse nested attribuets
+ *   nla_parse_nested()                        parse nested attributes
  *   nla_for_each_attr()               loop over all attributes
  *   nla_for_each_nested()             loop over the nested attributes
  *=========================================================================
@@ -172,7 +172,7 @@ enum {
        NLA_FLAG,
        NLA_MSECS,
        NLA_NESTED,
-       NLA_NESTED_COMPAT,
+       NLA_NESTED_ARRAY,
        NLA_NUL_STRING,
        NLA_BINARY,
        NLA_S8,
@@ -188,9 +188,20 @@ enum {
 
 #define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1)
 
+enum nla_policy_validation {
+       NLA_VALIDATE_NONE,
+       NLA_VALIDATE_RANGE,
+       NLA_VALIDATE_MIN,
+       NLA_VALIDATE_MAX,
+       NLA_VALIDATE_FUNCTION,
+};
+
 /**
  * struct nla_policy - attribute validation policy
  * @type: Type of attribute or NLA_UNSPEC
+ * @validation_type: type of attribute validation done in addition to
+ *     type-specific validation (e.g. range, function call), see
+ *     &enum nla_policy_validation
  * @len: Type specific length of payload
  *
  * Policies are defined as arrays of this struct, the array must be
@@ -201,9 +212,11 @@ enum {
  *    NLA_NUL_STRING       Maximum length of string (excluding NUL)
  *    NLA_FLAG             Unused
  *    NLA_BINARY           Maximum length of attribute payload
- *    NLA_NESTED           Don't use `len' field -- length verification is
- *                         done by checking len of nested header (or empty)
- *    NLA_NESTED_COMPAT    Minimum length of structure payload
+ *    NLA_NESTED,
+ *    NLA_NESTED_ARRAY     Length verification is done by checking len of
+ *                         nested header (or empty); len field is used if
+ *                         validation_data is also used, for the max attr
+ *                         number in the nested policy.
  *    NLA_U8, NLA_U16,
  *    NLA_U32, NLA_U64,
  *    NLA_S8, NLA_S16,
@@ -226,7 +239,43 @@ enum {
  *    NLA_REJECT           This attribute is always rejected and validation data
  *                         may point to a string to report as the error instead
  *                         of the generic one in extended ACK.
- *    All other            Unused
+ *    NLA_NESTED           Points to a nested policy to validate, must also set
+ *                         `len' to the max attribute number.
+ *                         Note that nla_parse() will validate, but of course not
+ *                         parse, the nested sub-policies.
+ *    NLA_NESTED_ARRAY     Points to a nested policy to validate, must also set
+ *                         `len' to the max attribute number. The difference to
+ *                         NLA_NESTED is the structure - NLA_NESTED has the
+ *                         nested attributes directly inside, while an array has
+ *                         the nested attributes at another level down and the
+ *                         attributes directly in the nesting don't matter.
+ *    All other            Unused - but note that it's a union
+ *
+ * Meaning of `min' and `max' fields, use via NLA_POLICY_MIN, NLA_POLICY_MAX
+ * and NLA_POLICY_RANGE:
+ *    NLA_U8,
+ *    NLA_U16,
+ *    NLA_U32,
+ *    NLA_U64,
+ *    NLA_S8,
+ *    NLA_S16,
+ *    NLA_S32,
+ *    NLA_S64              These are used depending on the validation_type
+ *                         field, if that is min/max/range then the minimum,
+ *                         maximum and both are used (respectively) to check
+ *                         the value of the integer attribute.
+ *                         Note that in the interest of code simplicity and
+ *                         struct size both limits are s16, so you cannot
+ *                         enforce a range that doesn't fall within the range
+ *                         of s16 - do that as usual in the code instead.
+ *    All other            Unused - but note that it's a union
+ *
+ * Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN:
+ *    NLA_BINARY           Validation function called for the attribute,
+ *                         not compatible with use of the validation_data
+ *                         as in NLA_BITFIELD32, NLA_REJECT, NLA_NESTED and
+ *                         NLA_NESTED_ARRAY.
+ *    All other            Unused - but note that it's a union
  *
  * Example:
  * static const struct nla_policy my_policy[ATTR_MAX+1] = {
@@ -237,9 +286,17 @@ enum {
  * };
  */
 struct nla_policy {
-       u16             type;
+       u8              type;
+       u8              validation_type;
        u16             len;
-       void            *validation_data;
+       union {
+               const void *validation_data;
+               struct {
+                       s16 min, max;
+               };
+               int (*validate)(const struct nlattr *attr,
+                               struct netlink_ext_ack *extack);
+       };
 };
 
 #define NLA_POLICY_EXACT_LEN(_len)     { .type = NLA_EXACT_LEN, .len = _len }
@@ -249,6 +306,49 @@ struct nla_policy {
 #define NLA_POLICY_ETH_ADDR            NLA_POLICY_EXACT_LEN(ETH_ALEN)
 #define NLA_POLICY_ETH_ADDR_COMPAT     NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN)
 
+#define NLA_POLICY_NESTED(maxattr, policy) \
+       { .type = NLA_NESTED, .validation_data = policy, .len = maxattr }
+#define NLA_POLICY_NESTED_ARRAY(maxattr, policy) \
+       { .type = NLA_NESTED_ARRAY, .validation_data = policy, .len = maxattr }
+
+#define __NLA_ENSURE(condition) (sizeof(char[1 - 2*!(condition)]) - 1)
+#define NLA_ENSURE_INT_TYPE(tp)                                \
+       (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_U8 ||   \
+                     tp == NLA_S16 || tp == NLA_U16 || \
+                     tp == NLA_S32 || tp == NLA_U32 || \
+                     tp == NLA_S64 || tp == NLA_U64) + tp)
+#define NLA_ENSURE_NO_VALIDATION_PTR(tp)               \
+       (__NLA_ENSURE(tp != NLA_BITFIELD32 &&           \
+                     tp != NLA_REJECT &&               \
+                     tp != NLA_NESTED &&               \
+                     tp != NLA_NESTED_ARRAY) + tp)
+
+#define NLA_POLICY_RANGE(tp, _min, _max) {             \
+       .type = NLA_ENSURE_INT_TYPE(tp),                \
+       .validation_type = NLA_VALIDATE_RANGE,          \
+       .min = _min,                                    \
+       .max = _max                                     \
+}
+
+#define NLA_POLICY_MIN(tp, _min) {                     \
+       .type = NLA_ENSURE_INT_TYPE(tp),                \
+       .validation_type = NLA_VALIDATE_MIN,            \
+       .min = _min,                                    \
+}
+
+#define NLA_POLICY_MAX(tp, _max) {                     \
+       .type = NLA_ENSURE_INT_TYPE(tp),                \
+       .validation_type = NLA_VALIDATE_MAX,            \
+       .max = _max,                                    \
+}
+
+#define NLA_POLICY_VALIDATE_FN(tp, fn, ...) {          \
+       .type = NLA_ENSURE_NO_VALIDATION_PTR(tp),       \
+       .validation_type = NLA_VALIDATE_FUNCTION,       \
+       .validate = fn,                                 \
+       .len = __VA_ARGS__ + 0,                         \
+}
+
 /**
  * struct nl_info - netlink source information
  * @nlh: Netlink message header of original request
@@ -273,6 +373,9 @@ int nla_validate(const struct nlattr *head, int len, int maxtype,
 int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
              int len, const struct nla_policy *policy,
              struct netlink_ext_ack *extack);
+int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head,
+                    int len, const struct nla_policy *policy,
+                    struct netlink_ext_ack *extack);
 int nla_policy_len(const struct nla_policy *, int);
 struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
 size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
@@ -416,13 +519,29 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
                              const struct nla_policy *policy,
                              struct netlink_ext_ack *extack)
 {
-       if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+       if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) {
+               NL_SET_ERR_MSG(extack, "Invalid header length");
                return -EINVAL;
+       }
 
        return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
                         nlmsg_attrlen(nlh, hdrlen), policy, extack);
 }
 
+static inline int nlmsg_parse_strict(const struct nlmsghdr *nlh, int hdrlen,
+                                    struct nlattr *tb[], int maxtype,
+                                    const struct nla_policy *policy,
+                                    struct netlink_ext_ack *extack)
+{
+       if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) {
+               NL_SET_ERR_MSG(extack, "Invalid header length");
+               return -EINVAL;
+       }
+
+       return nla_parse_strict(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
+                               nlmsg_attrlen(nlh, hdrlen), policy, extack);
+}
+
 /**
  * nlmsg_find_attr - find a specific attribute in a netlink message
  * @nlh: netlink message header
index bbfe27f86d5f7b91a7f5e48864123538eafb784d..72ffb3120cedfd596d25dece88ca35ed61da4e86 100644 (file)
@@ -65,11 +65,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
        return block->q;
 }
 
-static inline struct net_device *tcf_block_dev(struct tcf_block *block)
-{
-       return tcf_block_q(block)->dev_queue->dev;
-}
-
 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
                                         tc_setup_cb_t *cb, void *cb_ident);
@@ -122,11 +117,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
        return NULL;
 }
 
-static inline struct net_device *tcf_block_dev(struct tcf_block *block)
-{
-       return NULL;
-}
-
 static inline
 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
                               void *cb_priv)
index 38cae35f6e16056c862b29dee8ceb46fe16fff7b..751549ac0a849144ab0382203ee5c877374523e2 100644 (file)
@@ -1492,6 +1492,7 @@ static inline void lock_sock(struct sock *sk)
        lock_sock_nested(sk, 0);
 }
 
+void __release_sock(struct sock *sk);
 void release_sock(struct sock *sk);
 
 /* BH context may only use the following locking interface. */
index ff15d8e0d525715b17671e64f6abdead9df0a8f3..0d2929223c703c0aaabef0d485aabf7dc707aae1 100644 (file)
@@ -732,7 +732,7 @@ void tcp_send_window_probe(struct sock *sk);
 
 static inline u64 tcp_clock_ns(void)
 {
-       return ktime_get_tai_ns();
+       return ktime_get_ns();
 }
 
 static inline u64 tcp_clock_us(void)
index 1615fb5ea11443c17be45885257f83ff878ca36f..5e853835597e7f8fee2a61052bc58e16b0f003b6 100644 (file)
@@ -101,13 +101,12 @@ struct tls_rec {
        struct list_head list;
        int tx_ready;
        int tx_flags;
-       struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS];
-       struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS];
+       int inplace_crypto;
 
        /* AAD | sg_plaintext_data | sg_tag */
-       struct scatterlist sg_aead_in[2];
+       struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS + 1];
        /* AAD | sg_encrypted_data (data contain overhead for hdr&iv&tag) */
-       struct scatterlist sg_aead_out[2];
+       struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS + 1];
 
        unsigned int sg_plaintext_size;
        unsigned int sg_encrypted_size;
index 8482a990b0bb8e781883d3e09ea84a6f345863ed..9e82cb391dea2b64fec14f8e6323c22d4c7b5c53 100644 (file)
@@ -443,8 +443,10 @@ int udpv4_offload_init(void);
 
 void udp_init(void);
 
+DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
 void udp_encap_enable(void);
 #if IS_ENABLED(CONFIG_IPV6)
+DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
 void udpv6_encap_enable(void);
 #endif
 
diff --git a/include/soc/mscc/ocelot_hsio.h b/include/soc/mscc/ocelot_hsio.h
new file mode 100644 (file)
index 0000000..43112dd
--- /dev/null
@@ -0,0 +1,859 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_HSIO_H_
+#define _MSCC_OCELOT_HSIO_H_
+
+#define HSIO_PLL5G_CFG0                        0x0000
+#define HSIO_PLL5G_CFG1                        0x0004
+#define HSIO_PLL5G_CFG2                        0x0008
+#define HSIO_PLL5G_CFG3                        0x000c
+#define HSIO_PLL5G_CFG4                        0x0010
+#define HSIO_PLL5G_CFG5                        0x0014
+#define HSIO_PLL5G_CFG6                        0x0018
+#define HSIO_PLL5G_STATUS0             0x001c
+#define HSIO_PLL5G_STATUS1             0x0020
+#define HSIO_PLL5G_BIST_CFG0           0x0024
+#define HSIO_PLL5G_BIST_CFG1           0x0028
+#define HSIO_PLL5G_BIST_CFG2           0x002c
+#define HSIO_PLL5G_BIST_STAT0          0x0030
+#define HSIO_PLL5G_BIST_STAT1          0x0034
+#define HSIO_RCOMP_CFG0                        0x0038
+#define HSIO_RCOMP_STATUS              0x003c
+#define HSIO_SYNC_ETH_CFG              0x0040
+#define HSIO_SYNC_ETH_PLL_CFG          0x0048
+#define HSIO_S1G_DES_CFG               0x004c
+#define HSIO_S1G_IB_CFG                        0x0050
+#define HSIO_S1G_OB_CFG                        0x0054
+#define HSIO_S1G_SER_CFG               0x0058
+#define HSIO_S1G_COMMON_CFG            0x005c
+#define HSIO_S1G_PLL_CFG               0x0060
+#define HSIO_S1G_PLL_STATUS            0x0064
+#define HSIO_S1G_DFT_CFG0              0x0068
+#define HSIO_S1G_DFT_CFG1              0x006c
+#define HSIO_S1G_DFT_CFG2              0x0070
+#define HSIO_S1G_TP_CFG                        0x0074
+#define HSIO_S1G_RC_PLL_BIST_CFG       0x0078
+#define HSIO_S1G_MISC_CFG              0x007c
+#define HSIO_S1G_DFT_STATUS            0x0080
+#define HSIO_S1G_MISC_STATUS           0x0084
+#define HSIO_MCB_S1G_ADDR_CFG          0x0088
+#define HSIO_S6G_DIG_CFG               0x008c
+#define HSIO_S6G_DFT_CFG0              0x0090
+#define HSIO_S6G_DFT_CFG1              0x0094
+#define HSIO_S6G_DFT_CFG2              0x0098
+#define HSIO_S6G_TP_CFG0               0x009c
+#define HSIO_S6G_TP_CFG1               0x00a0
+#define HSIO_S6G_RC_PLL_BIST_CFG       0x00a4
+#define HSIO_S6G_MISC_CFG              0x00a8
+#define HSIO_S6G_OB_ANEG_CFG           0x00ac
+#define HSIO_S6G_DFT_STATUS            0x00b0
+#define HSIO_S6G_ERR_CNT               0x00b4
+#define HSIO_S6G_MISC_STATUS           0x00b8
+#define HSIO_S6G_DES_CFG               0x00bc
+#define HSIO_S6G_IB_CFG                        0x00c0
+#define HSIO_S6G_IB_CFG1               0x00c4
+#define HSIO_S6G_IB_CFG2               0x00c8
+#define HSIO_S6G_IB_CFG3               0x00cc
+#define HSIO_S6G_IB_CFG4               0x00d0
+#define HSIO_S6G_IB_CFG5               0x00d4
+#define HSIO_S6G_OB_CFG                        0x00d8
+#define HSIO_S6G_OB_CFG1               0x00dc
+#define HSIO_S6G_SER_CFG               0x00e0
+#define HSIO_S6G_COMMON_CFG            0x00e4
+#define HSIO_S6G_PLL_CFG               0x00e8
+#define HSIO_S6G_ACJTAG_CFG            0x00ec
+#define HSIO_S6G_GP_CFG                        0x00f0
+#define HSIO_S6G_IB_STATUS0            0x00f4
+#define HSIO_S6G_IB_STATUS1            0x00f8
+#define HSIO_S6G_ACJTAG_STATUS         0x00fc
+#define HSIO_S6G_PLL_STATUS            0x0100
+#define HSIO_S6G_REVID                 0x0104
+#define HSIO_MCB_S6G_ADDR_CFG          0x0108
+#define HSIO_HW_CFG                    0x010c
+#define HSIO_HW_QSGMII_CFG             0x0110
+#define HSIO_HW_QSGMII_STAT            0x0114
+#define HSIO_CLK_CFG                   0x0118
+#define HSIO_TEMP_SENSOR_CTRL          0x011c
+#define HSIO_TEMP_SENSOR_CFG           0x0120
+#define HSIO_TEMP_SENSOR_STAT          0x0124
+
+#define HSIO_PLL5G_CFG0_ENA_ROT                           BIT(31)
+#define HSIO_PLL5G_CFG0_ENA_LANE                          BIT(30)
+#define HSIO_PLL5G_CFG0_ENA_CLKTREE                       BIT(29)
+#define HSIO_PLL5G_CFG0_DIV4                              BIT(28)
+#define HSIO_PLL5G_CFG0_ENA_LOCK_FINE                     BIT(27)
+#define HSIO_PLL5G_CFG0_SELBGV820(x)                      (((x) << 23) & GENMASK(26, 23))
+#define HSIO_PLL5G_CFG0_SELBGV820_M                       GENMASK(26, 23)
+#define HSIO_PLL5G_CFG0_SELBGV820_X(x)                    (((x) & GENMASK(26, 23)) >> 23)
+#define HSIO_PLL5G_CFG0_LOOP_BW_RES(x)                    (((x) << 18) & GENMASK(22, 18))
+#define HSIO_PLL5G_CFG0_LOOP_BW_RES_M                     GENMASK(22, 18)
+#define HSIO_PLL5G_CFG0_LOOP_BW_RES_X(x)                  (((x) & GENMASK(22, 18)) >> 18)
+#define HSIO_PLL5G_CFG0_SELCPI(x)                         (((x) << 16) & GENMASK(17, 16))
+#define HSIO_PLL5G_CFG0_SELCPI_M                          GENMASK(17, 16)
+#define HSIO_PLL5G_CFG0_SELCPI_X(x)                       (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_PLL5G_CFG0_ENA_VCO_CONTRH                    BIT(15)
+#define HSIO_PLL5G_CFG0_ENA_CP1                           BIT(14)
+#define HSIO_PLL5G_CFG0_ENA_VCO_BUF                       BIT(13)
+#define HSIO_PLL5G_CFG0_ENA_BIAS                          BIT(12)
+#define HSIO_PLL5G_CFG0_CPU_CLK_DIV(x)                    (((x) << 6) & GENMASK(11, 6))
+#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_M                     GENMASK(11, 6)
+#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_X(x)                  (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_PLL5G_CFG0_CORE_CLK_DIV(x)                   ((x) & GENMASK(5, 0))
+#define HSIO_PLL5G_CFG0_CORE_CLK_DIV_M                    GENMASK(5, 0)
+
+#define HSIO_PLL5G_CFG1_ENA_DIRECT                        BIT(18)
+#define HSIO_PLL5G_CFG1_ROT_SPEED                         BIT(17)
+#define HSIO_PLL5G_CFG1_ROT_DIR                           BIT(16)
+#define HSIO_PLL5G_CFG1_READBACK_DATA_SEL                 BIT(15)
+#define HSIO_PLL5G_CFG1_RC_ENABLE                         BIT(14)
+#define HSIO_PLL5G_CFG1_RC_CTRL_DATA(x)                   (((x) << 6) & GENMASK(13, 6))
+#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_M                    GENMASK(13, 6)
+#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_X(x)                 (((x) & GENMASK(13, 6)) >> 6)
+#define HSIO_PLL5G_CFG1_QUARTER_RATE                      BIT(5)
+#define HSIO_PLL5G_CFG1_PWD_TX                            BIT(4)
+#define HSIO_PLL5G_CFG1_PWD_RX                            BIT(3)
+#define HSIO_PLL5G_CFG1_OUT_OF_RANGE_RECAL_ENA            BIT(2)
+#define HSIO_PLL5G_CFG1_HALF_RATE                         BIT(1)
+#define HSIO_PLL5G_CFG1_FORCE_SET_ENA                     BIT(0)
+
+#define HSIO_PLL5G_CFG2_ENA_TEST_MODE                     BIT(30)
+#define HSIO_PLL5G_CFG2_ENA_PFD_IN_FLIP                   BIT(29)
+#define HSIO_PLL5G_CFG2_ENA_VCO_NREF_TESTOUT              BIT(28)
+#define HSIO_PLL5G_CFG2_ENA_FBTESTOUT                     BIT(27)
+#define HSIO_PLL5G_CFG2_ENA_RCPLL                         BIT(26)
+#define HSIO_PLL5G_CFG2_ENA_CP2                           BIT(25)
+#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS1                   BIT(24)
+#define HSIO_PLL5G_CFG2_AMPC_SEL(x)                       (((x) << 16) & GENMASK(23, 16))
+#define HSIO_PLL5G_CFG2_AMPC_SEL_M                        GENMASK(23, 16)
+#define HSIO_PLL5G_CFG2_AMPC_SEL_X(x)                     (((x) & GENMASK(23, 16)) >> 16)
+#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS                    BIT(15)
+#define HSIO_PLL5G_CFG2_PWD_AMPCTRL_N                     BIT(14)
+#define HSIO_PLL5G_CFG2_ENA_AMPCTRL                       BIT(13)
+#define HSIO_PLL5G_CFG2_ENA_AMP_CTRL_FORCE                BIT(12)
+#define HSIO_PLL5G_CFG2_FRC_FSM_POR                       BIT(11)
+#define HSIO_PLL5G_CFG2_DISABLE_FSM_POR                   BIT(10)
+#define HSIO_PLL5G_CFG2_GAIN_TEST(x)                      (((x) << 5) & GENMASK(9, 5))
+#define HSIO_PLL5G_CFG2_GAIN_TEST_M                       GENMASK(9, 5)
+#define HSIO_PLL5G_CFG2_GAIN_TEST_X(x)                    (((x) & GENMASK(9, 5)) >> 5)
+#define HSIO_PLL5G_CFG2_EN_RESET_OVERRUN                  BIT(4)
+#define HSIO_PLL5G_CFG2_EN_RESET_LIM_DET                  BIT(3)
+#define HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET                  BIT(2)
+#define HSIO_PLL5G_CFG2_DISABLE_FSM                       BIT(1)
+#define HSIO_PLL5G_CFG2_ENA_GAIN_TEST                     BIT(0)
+
+#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL(x)               (((x) << 22) & GENMASK(23, 22))
+#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_M                GENMASK(23, 22)
+#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_X(x)             (((x) & GENMASK(23, 22)) >> 22)
+#define HSIO_PLL5G_CFG3_TESTOUT_SEL(x)                    (((x) << 19) & GENMASK(21, 19))
+#define HSIO_PLL5G_CFG3_TESTOUT_SEL_M                     GENMASK(21, 19)
+#define HSIO_PLL5G_CFG3_TESTOUT_SEL_X(x)                  (((x) & GENMASK(21, 19)) >> 19)
+#define HSIO_PLL5G_CFG3_ENA_ANA_TEST_OUT                  BIT(18)
+#define HSIO_PLL5G_CFG3_ENA_TEST_OUT                      BIT(17)
+#define HSIO_PLL5G_CFG3_SEL_FBDCLK                        BIT(16)
+#define HSIO_PLL5G_CFG3_SEL_CML_CMOS_PFD                  BIT(15)
+#define HSIO_PLL5G_CFG3_RST_FB_N                          BIT(14)
+#define HSIO_PLL5G_CFG3_FORCE_VCO_CONTRH                  BIT(13)
+#define HSIO_PLL5G_CFG3_FORCE_LO                          BIT(12)
+#define HSIO_PLL5G_CFG3_FORCE_HI                          BIT(11)
+#define HSIO_PLL5G_CFG3_FORCE_ENA                         BIT(10)
+#define HSIO_PLL5G_CFG3_FORCE_CP                          BIT(9)
+#define HSIO_PLL5G_CFG3_FBDIVSEL_TST_ENA                  BIT(8)
+#define HSIO_PLL5G_CFG3_FBDIVSEL(x)                       ((x) & GENMASK(7, 0))
+#define HSIO_PLL5G_CFG3_FBDIVSEL_M                        GENMASK(7, 0)
+
+#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL(x)                   (((x) << 16) & GENMASK(23, 16))
+#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_M                    GENMASK(23, 16)
+#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_X(x)                 (((x) & GENMASK(23, 16)) >> 16)
+#define HSIO_PLL5G_CFG4_IB_CTRL(x)                        ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_CFG4_IB_CTRL_M                         GENMASK(15, 0)
+
+#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL(x)                   (((x) << 16) & GENMASK(23, 16))
+#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_M                    GENMASK(23, 16)
+#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_X(x)                 (((x) & GENMASK(23, 16)) >> 16)
+#define HSIO_PLL5G_CFG5_OB_CTRL(x)                        ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_CFG5_OB_CTRL_M                         GENMASK(15, 0)
+
+#define HSIO_PLL5G_CFG6_REFCLK_SEL_SRC                    BIT(23)
+#define HSIO_PLL5G_CFG6_REFCLK_SEL(x)                     (((x) << 20) & GENMASK(22, 20))
+#define HSIO_PLL5G_CFG6_REFCLK_SEL_M                      GENMASK(22, 20)
+#define HSIO_PLL5G_CFG6_REFCLK_SEL_X(x)                   (((x) & GENMASK(22, 20)) >> 20)
+#define HSIO_PLL5G_CFG6_REFCLK_SRC                        BIT(19)
+#define HSIO_PLL5G_CFG6_POR_DEL_SEL(x)                    (((x) << 16) & GENMASK(17, 16))
+#define HSIO_PLL5G_CFG6_POR_DEL_SEL_M                     GENMASK(17, 16)
+#define HSIO_PLL5G_CFG6_POR_DEL_SEL_X(x)                  (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_PLL5G_CFG6_DIV125REF_SEL(x)                  (((x) << 8) & GENMASK(15, 8))
+#define HSIO_PLL5G_CFG6_DIV125REF_SEL_M                   GENMASK(15, 8)
+#define HSIO_PLL5G_CFG6_DIV125REF_SEL_X(x)                (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_PLL5G_CFG6_ENA_REFCLKC2                      BIT(7)
+#define HSIO_PLL5G_CFG6_ENA_FBCLKC2                       BIT(6)
+#define HSIO_PLL5G_CFG6_DDR_CLK_DIV(x)                    ((x) & GENMASK(5, 0))
+#define HSIO_PLL5G_CFG6_DDR_CLK_DIV_M                     GENMASK(5, 0)
+
+#define HSIO_PLL5G_STATUS0_RANGE_LIM                      BIT(12)
+#define HSIO_PLL5G_STATUS0_OUT_OF_RANGE_ERR               BIT(11)
+#define HSIO_PLL5G_STATUS0_CALIBRATION_ERR                BIT(10)
+#define HSIO_PLL5G_STATUS0_CALIBRATION_DONE               BIT(9)
+#define HSIO_PLL5G_STATUS0_READBACK_DATA(x)               (((x) << 1) & GENMASK(8, 1))
+#define HSIO_PLL5G_STATUS0_READBACK_DATA_M                GENMASK(8, 1)
+#define HSIO_PLL5G_STATUS0_READBACK_DATA_X(x)             (((x) & GENMASK(8, 1)) >> 1)
+#define HSIO_PLL5G_STATUS0_LOCK_STATUS                    BIT(0)
+
+#define HSIO_PLL5G_STATUS1_SIG_DEL(x)                     (((x) << 21) & GENMASK(28, 21))
+#define HSIO_PLL5G_STATUS1_SIG_DEL_M                      GENMASK(28, 21)
+#define HSIO_PLL5G_STATUS1_SIG_DEL_X(x)                   (((x) & GENMASK(28, 21)) >> 21)
+#define HSIO_PLL5G_STATUS1_GAIN_STAT(x)                   (((x) << 16) & GENMASK(20, 16))
+#define HSIO_PLL5G_STATUS1_GAIN_STAT_M                    GENMASK(20, 16)
+#define HSIO_PLL5G_STATUS1_GAIN_STAT_X(x)                 (((x) & GENMASK(20, 16)) >> 16)
+#define HSIO_PLL5G_STATUS1_FBCNT_DIF(x)                   (((x) << 4) & GENMASK(13, 4))
+#define HSIO_PLL5G_STATUS1_FBCNT_DIF_M                    GENMASK(13, 4)
+#define HSIO_PLL5G_STATUS1_FBCNT_DIF_X(x)                 (((x) & GENMASK(13, 4)) >> 4)
+#define HSIO_PLL5G_STATUS1_FSM_STAT(x)                    (((x) << 1) & GENMASK(3, 1))
+#define HSIO_PLL5G_STATUS1_FSM_STAT_M                     GENMASK(3, 1)
+#define HSIO_PLL5G_STATUS1_FSM_STAT_X(x)                  (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_PLL5G_STATUS1_FSM_LOCK                       BIT(0)
+
+#define HSIO_PLL5G_BIST_CFG0_PLLB_START_BIST              BIT(31)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_MEAS_MODE               BIT(30)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT(x)          (((x) << 20) & GENMASK(23, 20))
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_M           GENMASK(23, 20)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_X(x)        (((x) & GENMASK(23, 20)) >> 20)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT(x)          (((x) << 16) & GENMASK(19, 16))
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_M           GENMASK(19, 16)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_X(x)        (((x) & GENMASK(19, 16)) >> 16)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE(x)       ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE_M        GENMASK(15, 0)
+
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT(x)            (((x) << 4) & GENMASK(7, 4))
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_M             GENMASK(7, 4)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_X(x)          (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_BUSY                   BIT(2)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_DONE_N                 BIT(1)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FAIL                   BIT(0)
+
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT(x)             (((x) << 16) & GENMASK(31, 16))
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_M              GENMASK(31, 16)
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_X(x)           (((x) & GENMASK(31, 16)) >> 16)
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF(x)        ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF_M         GENMASK(15, 0)
+
+#define HSIO_RCOMP_CFG0_PWD_ENA                           BIT(13)
+#define HSIO_RCOMP_CFG0_RUN_CAL                           BIT(12)
+#define HSIO_RCOMP_CFG0_SPEED_SEL(x)                      (((x) << 10) & GENMASK(11, 10))
+#define HSIO_RCOMP_CFG0_SPEED_SEL_M                       GENMASK(11, 10)
+#define HSIO_RCOMP_CFG0_SPEED_SEL_X(x)                    (((x) & GENMASK(11, 10)) >> 10)
+#define HSIO_RCOMP_CFG0_MODE_SEL(x)                       (((x) << 8) & GENMASK(9, 8))
+#define HSIO_RCOMP_CFG0_MODE_SEL_M                        GENMASK(9, 8)
+#define HSIO_RCOMP_CFG0_MODE_SEL_X(x)                     (((x) & GENMASK(9, 8)) >> 8)
+#define HSIO_RCOMP_CFG0_FORCE_ENA                         BIT(4)
+#define HSIO_RCOMP_CFG0_RCOMP_VAL(x)                      ((x) & GENMASK(3, 0))
+#define HSIO_RCOMP_CFG0_RCOMP_VAL_M                       GENMASK(3, 0)
+
+#define HSIO_RCOMP_STATUS_BUSY                            BIT(12)
+#define HSIO_RCOMP_STATUS_DELTA_ALERT                     BIT(7)
+#define HSIO_RCOMP_STATUS_RCOMP(x)                        ((x) & GENMASK(3, 0))
+#define HSIO_RCOMP_STATUS_RCOMP_M                         GENMASK(3, 0)
+
+#define HSIO_SYNC_ETH_CFG_RSZ                             0x4
+
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC(x)             (((x) << 4) & GENMASK(7, 4))
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_M              GENMASK(7, 4)
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_X(x)           (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV(x)             (((x) << 1) & GENMASK(3, 1))
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_M              GENMASK(3, 1)
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_X(x)           (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_SYNC_ETH_CFG_RECO_CLK_ENA                    BIT(0)
+
+#define HSIO_SYNC_ETH_PLL_CFG_PLL_AUTO_SQUELCH_ENA        BIT(0)
+
+#define HSIO_S1G_DES_CFG_DES_PHS_CTRL(x)                  (((x) << 13) & GENMASK(16, 13))
+#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_M                   GENMASK(16, 13)
+#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_X(x)                (((x) & GENMASK(16, 13)) >> 13)
+#define HSIO_S1G_DES_CFG_DES_CPMD_SEL(x)                  (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_M                   GENMASK(12, 11)
+#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_X(x)                (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL(x)                 (((x) << 8) & GENMASK(10, 8))
+#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_M                  GENMASK(10, 8)
+#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_X(x)               (((x) & GENMASK(10, 8)) >> 8)
+#define HSIO_S1G_DES_CFG_DES_BW_ANA(x)                    (((x) << 5) & GENMASK(7, 5))
+#define HSIO_S1G_DES_CFG_DES_BW_ANA_M                     GENMASK(7, 5)
+#define HSIO_S1G_DES_CFG_DES_BW_ANA_X(x)                  (((x) & GENMASK(7, 5)) >> 5)
+#define HSIO_S1G_DES_CFG_DES_SWAP_ANA                     BIT(4)
+#define HSIO_S1G_DES_CFG_DES_BW_HYST(x)                   (((x) << 1) & GENMASK(3, 1))
+#define HSIO_S1G_DES_CFG_DES_BW_HYST_M                    GENMASK(3, 1)
+#define HSIO_S1G_DES_CFG_DES_BW_HYST_X(x)                 (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_S1G_DES_CFG_DES_SWAP_HYST                    BIT(0)
+
+#define HSIO_S1G_IB_CFG_IB_FX100_ENA                      BIT(27)
+#define HSIO_S1G_IB_CFG_ACJTAG_HYST(x)                    (((x) << 24) & GENMASK(26, 24))
+#define HSIO_S1G_IB_CFG_ACJTAG_HYST_M                     GENMASK(26, 24)
+#define HSIO_S1G_IB_CFG_ACJTAG_HYST_X(x)                  (((x) & GENMASK(26, 24)) >> 24)
+#define HSIO_S1G_IB_CFG_IB_DET_LEV(x)                     (((x) << 19) & GENMASK(21, 19))
+#define HSIO_S1G_IB_CFG_IB_DET_LEV_M                      GENMASK(21, 19)
+#define HSIO_S1G_IB_CFG_IB_DET_LEV_X(x)                   (((x) & GENMASK(21, 19)) >> 19)
+#define HSIO_S1G_IB_CFG_IB_HYST_LEV                       BIT(14)
+#define HSIO_S1G_IB_CFG_IB_ENA_CMV_TERM                   BIT(13)
+#define HSIO_S1G_IB_CFG_IB_ENA_DC_COUPLING                BIT(12)
+#define HSIO_S1G_IB_CFG_IB_ENA_DETLEV                     BIT(11)
+#define HSIO_S1G_IB_CFG_IB_ENA_HYST                       BIT(10)
+#define HSIO_S1G_IB_CFG_IB_ENA_OFFSET_COMP                BIT(9)
+#define HSIO_S1G_IB_CFG_IB_EQ_GAIN(x)                     (((x) << 6) & GENMASK(8, 6))
+#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_M                      GENMASK(8, 6)
+#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_X(x)                   (((x) & GENMASK(8, 6)) >> 6)
+#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ(x)             (((x) << 4) & GENMASK(5, 4))
+#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_M              GENMASK(5, 4)
+#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_X(x)           (((x) & GENMASK(5, 4)) >> 4)
+#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL(x)               ((x) & GENMASK(3, 0))
+#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL_M                GENMASK(3, 0)
+
+#define HSIO_S1G_OB_CFG_OB_SLP(x)                         (((x) << 17) & GENMASK(18, 17))
+#define HSIO_S1G_OB_CFG_OB_SLP_M                          GENMASK(18, 17)
+#define HSIO_S1G_OB_CFG_OB_SLP_X(x)                       (((x) & GENMASK(18, 17)) >> 17)
+#define HSIO_S1G_OB_CFG_OB_AMP_CTRL(x)                    (((x) << 13) & GENMASK(16, 13))
+#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_M                     GENMASK(16, 13)
+#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_X(x)                  (((x) & GENMASK(16, 13)) >> 13)
+#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL(x)               (((x) << 10) & GENMASK(12, 10))
+#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_M                GENMASK(12, 10)
+#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_X(x)             (((x) & GENMASK(12, 10)) >> 10)
+#define HSIO_S1G_OB_CFG_OB_DIS_VCM_CTRL                   BIT(9)
+#define HSIO_S1G_OB_CFG_OB_EN_MEAS_VREG                   BIT(8)
+#define HSIO_S1G_OB_CFG_OB_VCM_CTRL(x)                    (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_M                     GENMASK(7, 4)
+#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_X(x)                  (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL(x)               ((x) & GENMASK(3, 0))
+#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL_M                GENMASK(3, 0)
+
+#define HSIO_S1G_SER_CFG_SER_IDLE                         BIT(9)
+#define HSIO_S1G_SER_CFG_SER_DEEMPH                       BIT(8)
+#define HSIO_S1G_SER_CFG_SER_CPMD_SEL                     BIT(7)
+#define HSIO_S1G_SER_CFG_SER_SWAP_CPMD                    BIT(6)
+#define HSIO_S1G_SER_CFG_SER_ALISEL(x)                    (((x) << 4) & GENMASK(5, 4))
+#define HSIO_S1G_SER_CFG_SER_ALISEL_M                     GENMASK(5, 4)
+#define HSIO_S1G_SER_CFG_SER_ALISEL_X(x)                  (((x) & GENMASK(5, 4)) >> 4)
+#define HSIO_S1G_SER_CFG_SER_ENHYS                        BIT(3)
+#define HSIO_S1G_SER_CFG_SER_BIG_WIN                      BIT(2)
+#define HSIO_S1G_SER_CFG_SER_EN_WIN                       BIT(1)
+#define HSIO_S1G_SER_CFG_SER_ENALI                        BIT(0)
+
+#define HSIO_S1G_COMMON_CFG_SYS_RST                       BIT(31)
+#define HSIO_S1G_COMMON_CFG_SE_AUTO_SQUELCH_ENA           BIT(21)
+#define HSIO_S1G_COMMON_CFG_ENA_LANE                      BIT(18)
+#define HSIO_S1G_COMMON_CFG_PWD_RX                        BIT(17)
+#define HSIO_S1G_COMMON_CFG_PWD_TX                        BIT(16)
+#define HSIO_S1G_COMMON_CFG_LANE_CTRL(x)                  (((x) << 13) & GENMASK(15, 13))
+#define HSIO_S1G_COMMON_CFG_LANE_CTRL_M                   GENMASK(15, 13)
+#define HSIO_S1G_COMMON_CFG_LANE_CTRL_X(x)                (((x) & GENMASK(15, 13)) >> 13)
+#define HSIO_S1G_COMMON_CFG_ENA_DIRECT                    BIT(12)
+#define HSIO_S1G_COMMON_CFG_ENA_ELOOP                     BIT(11)
+#define HSIO_S1G_COMMON_CFG_ENA_FLOOP                     BIT(10)
+#define HSIO_S1G_COMMON_CFG_ENA_ILOOP                     BIT(9)
+#define HSIO_S1G_COMMON_CFG_ENA_PLOOP                     BIT(8)
+#define HSIO_S1G_COMMON_CFG_HRATE                         BIT(7)
+#define HSIO_S1G_COMMON_CFG_IF_MODE                       BIT(0)
+
+#define HSIO_S1G_PLL_CFG_PLL_ENA_FB_DIV2                  BIT(22)
+#define HSIO_S1G_PLL_CFG_PLL_ENA_RC_DIV2                  BIT(21)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(x)             (((x) << 8) & GENMASK(15, 8))
+#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M              GENMASK(15, 8)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x)           (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_ENA                      BIT(7)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_FORCE_SET_ENA            BIT(6)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA            BIT(5)
+#define HSIO_S1G_PLL_CFG_PLL_RB_DATA_SEL                  BIT(3)
+
+#define HSIO_S1G_PLL_STATUS_PLL_CAL_NOT_DONE              BIT(12)
+#define HSIO_S1G_PLL_STATUS_PLL_CAL_ERR                   BIT(11)
+#define HSIO_S1G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR          BIT(10)
+#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA(x)                ((x) & GENMASK(7, 0))
+#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA_M                 GENMASK(7, 0)
+
+#define HSIO_S1G_DFT_CFG0_LAZYBIT                         BIT(31)
+#define HSIO_S1G_DFT_CFG0_INV_DIS                         BIT(23)
+#define HSIO_S1G_DFT_CFG0_PRBS_SEL(x)                     (((x) << 20) & GENMASK(21, 20))
+#define HSIO_S1G_DFT_CFG0_PRBS_SEL_M                      GENMASK(21, 20)
+#define HSIO_S1G_DFT_CFG0_PRBS_SEL_X(x)                   (((x) & GENMASK(21, 20)) >> 20)
+#define HSIO_S1G_DFT_CFG0_TEST_MODE(x)                    (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S1G_DFT_CFG0_TEST_MODE_M                     GENMASK(18, 16)
+#define HSIO_S1G_DFT_CFG0_TEST_MODE_X(x)                  (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S1G_DFT_CFG0_RX_PHS_CORR_DIS                 BIT(4)
+#define HSIO_S1G_DFT_CFG0_RX_PDSENS_ENA                   BIT(3)
+#define HSIO_S1G_DFT_CFG0_RX_DFT_ENA                      BIT(2)
+#define HSIO_S1G_DFT_CFG0_TX_DFT_ENA                      BIT(0)
+
+#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL(x)               (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_M                GENMASK(17, 8)
+#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_X(x)             (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ(x)                 (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_M                  GENMASK(7, 4)
+#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_X(x)               (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S1G_DFT_CFG1_TX_JI_ENA                       BIT(3)
+#define HSIO_S1G_DFT_CFG1_TX_WAVEFORM_SEL                 BIT(2)
+#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_DIR                  BIT(1)
+#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_ENA                  BIT(0)
+
+#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL(x)               (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_M                GENMASK(17, 8)
+#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_X(x)             (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ(x)                 (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_M                  GENMASK(7, 4)
+#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_X(x)               (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S1G_DFT_CFG2_RX_JI_ENA                       BIT(3)
+#define HSIO_S1G_DFT_CFG2_RX_WAVEFORM_SEL                 BIT(2)
+#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_DIR                  BIT(1)
+#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_ENA                  BIT(0)
+
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_ENA             BIT(20)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x)     (((x) << 16) & GENMASK(17, 16))
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M      GENMASK(17, 16)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x)   (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x)         (((x) << 8) & GENMASK(15, 8))
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M          GENMASK(15, 8)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x)       (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x)          ((x) & GENMASK(7, 0))
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M           GENMASK(7, 0)
+
+#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE(x)          (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_M           GENMASK(12, 11)
+#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_X(x)        (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_SWAP             BIT(10)
+#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_MODE             BIT(9)
+#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA              BIT(8)
+#define HSIO_S1G_MISC_CFG_RX_LPI_MODE_ENA                 BIT(5)
+#define HSIO_S1G_MISC_CFG_TX_LPI_MODE_ENA                 BIT(4)
+#define HSIO_S1G_MISC_CFG_RX_DATA_INV_ENA                 BIT(3)
+#define HSIO_S1G_MISC_CFG_TX_DATA_INV_ENA                 BIT(2)
+#define HSIO_S1G_MISC_CFG_LANE_RST                        BIT(0)
+
+#define HSIO_S1G_DFT_STATUS_PLL_BIST_NOT_DONE             BIT(7)
+#define HSIO_S1G_DFT_STATUS_PLL_BIST_FAILED               BIT(6)
+#define HSIO_S1G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR          BIT(5)
+#define HSIO_S1G_DFT_STATUS_BIST_ACTIVE                   BIT(3)
+#define HSIO_S1G_DFT_STATUS_BIST_NOSYNC                   BIT(2)
+#define HSIO_S1G_DFT_STATUS_BIST_COMPLETE_N               BIT(1)
+#define HSIO_S1G_DFT_STATUS_BIST_ERROR                    BIT(0)
+
+#define HSIO_S1G_MISC_STATUS_DES_100FX_PHASE_SEL          BIT(0)
+
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT        BIT(31)
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT        BIT(30)
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(x)            ((x) & GENMASK(8, 0))
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR_M             GENMASK(8, 0)
+
+#define HSIO_S6G_DIG_CFG_GP(x)                            (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S6G_DIG_CFG_GP_M                             GENMASK(18, 16)
+#define HSIO_S6G_DIG_CFG_GP_X(x)                          (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S6G_DIG_CFG_TX_BIT_DOUBLING_MODE_ENA         BIT(7)
+#define HSIO_S6G_DIG_CFG_SIGDET_TESTMODE                  BIT(6)
+#define HSIO_S6G_DIG_CFG_SIGDET_AST(x)                    (((x) << 3) & GENMASK(5, 3))
+#define HSIO_S6G_DIG_CFG_SIGDET_AST_M                     GENMASK(5, 3)
+#define HSIO_S6G_DIG_CFG_SIGDET_AST_X(x)                  (((x) & GENMASK(5, 3)) >> 3)
+#define HSIO_S6G_DIG_CFG_SIGDET_DST(x)                    ((x) & GENMASK(2, 0))
+#define HSIO_S6G_DIG_CFG_SIGDET_DST_M                     GENMASK(2, 0)
+
+#define HSIO_S6G_DFT_CFG0_LAZYBIT                         BIT(31)
+#define HSIO_S6G_DFT_CFG0_INV_DIS                         BIT(23)
+#define HSIO_S6G_DFT_CFG0_PRBS_SEL(x)                     (((x) << 20) & GENMASK(21, 20))
+#define HSIO_S6G_DFT_CFG0_PRBS_SEL_M                      GENMASK(21, 20)
+#define HSIO_S6G_DFT_CFG0_PRBS_SEL_X(x)                   (((x) & GENMASK(21, 20)) >> 20)
+#define HSIO_S6G_DFT_CFG0_TEST_MODE(x)                    (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S6G_DFT_CFG0_TEST_MODE_M                     GENMASK(18, 16)
+#define HSIO_S6G_DFT_CFG0_TEST_MODE_X(x)                  (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S6G_DFT_CFG0_RX_PHS_CORR_DIS                 BIT(4)
+#define HSIO_S6G_DFT_CFG0_RX_PDSENS_ENA                   BIT(3)
+#define HSIO_S6G_DFT_CFG0_RX_DFT_ENA                      BIT(2)
+#define HSIO_S6G_DFT_CFG0_TX_DFT_ENA                      BIT(0)
+
+#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL(x)               (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_M                GENMASK(17, 8)
+#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_X(x)             (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ(x)                 (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_M                  GENMASK(7, 4)
+#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_X(x)               (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S6G_DFT_CFG1_TX_JI_ENA                       BIT(3)
+#define HSIO_S6G_DFT_CFG1_TX_WAVEFORM_SEL                 BIT(2)
+#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_DIR                  BIT(1)
+#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_ENA                  BIT(0)
+
+#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL(x)               (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_M                GENMASK(17, 8)
+#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_X(x)             (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ(x)                 (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_M                  GENMASK(7, 4)
+#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_X(x)               (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S6G_DFT_CFG2_RX_JI_ENA                       BIT(3)
+#define HSIO_S6G_DFT_CFG2_RX_WAVEFORM_SEL                 BIT(2)
+#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_DIR                  BIT(1)
+#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_ENA                  BIT(0)
+
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_ENA             BIT(20)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x)     (((x) << 16) & GENMASK(19, 16))
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M      GENMASK(19, 16)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x)   (((x) & GENMASK(19, 16)) >> 16)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x)         (((x) << 8) & GENMASK(15, 8))
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M          GENMASK(15, 8)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x)       (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x)          ((x) & GENMASK(7, 0))
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M           GENMASK(7, 0)
+
+#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK(x)                 (((x) << 13) & GENMASK(14, 13))
+#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_M                  GENMASK(14, 13)
+#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_X(x)               (((x) & GENMASK(14, 13)) >> 13)
+#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE(x)          (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_M           GENMASK(12, 11)
+#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_X(x)        (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_SWAP             BIT(10)
+#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_MODE             BIT(9)
+#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA              BIT(8)
+#define HSIO_S6G_MISC_CFG_RX_BUS_FLIP_ENA                 BIT(7)
+#define HSIO_S6G_MISC_CFG_TX_BUS_FLIP_ENA                 BIT(6)
+#define HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA                 BIT(5)
+#define HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA                 BIT(4)
+#define HSIO_S6G_MISC_CFG_RX_DATA_INV_ENA                 BIT(3)
+#define HSIO_S6G_MISC_CFG_TX_DATA_INV_ENA                 BIT(2)
+#define HSIO_S6G_MISC_CFG_LANE_RST                        BIT(0)
+
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0(x)               (((x) << 23) & GENMASK(28, 23))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_M                GENMASK(28, 23)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_X(x)             (((x) & GENMASK(28, 23)) >> 23)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1(x)               (((x) << 18) & GENMASK(22, 18))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_M                GENMASK(22, 18)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_X(x)             (((x) & GENMASK(22, 18)) >> 18)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC(x)                (((x) << 13) & GENMASK(17, 13))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_M                 GENMASK(17, 13)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_X(x)              (((x) & GENMASK(17, 13)) >> 13)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS(x)             (((x) << 6) & GENMASK(8, 6))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_M              GENMASK(8, 6)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_X(x)           (((x) & GENMASK(8, 6)) >> 6)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV(x)                 ((x) & GENMASK(5, 0))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV_M                  GENMASK(5, 0)
+
+#define HSIO_S6G_DFT_STATUS_PRBS_SYNC_STAT                BIT(8)
+#define HSIO_S6G_DFT_STATUS_PLL_BIST_NOT_DONE             BIT(7)
+#define HSIO_S6G_DFT_STATUS_PLL_BIST_FAILED               BIT(6)
+#define HSIO_S6G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR          BIT(5)
+#define HSIO_S6G_DFT_STATUS_BIST_ACTIVE                   BIT(3)
+#define HSIO_S6G_DFT_STATUS_BIST_NOSYNC                   BIT(2)
+#define HSIO_S6G_DFT_STATUS_BIST_COMPLETE_N               BIT(1)
+#define HSIO_S6G_DFT_STATUS_BIST_ERROR                    BIT(0)
+
+#define HSIO_S6G_MISC_STATUS_DES_100FX_PHASE_SEL          BIT(0)
+
+#define HSIO_S6G_DES_CFG_DES_PHS_CTRL(x)                  (((x) << 13) & GENMASK(16, 13))
+#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_M                   GENMASK(16, 13)
+#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_X(x)                (((x) & GENMASK(16, 13)) >> 13)
+#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL(x)                 (((x) << 10) & GENMASK(12, 10))
+#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_M                  GENMASK(12, 10)
+#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_X(x)               (((x) & GENMASK(12, 10)) >> 10)
+#define HSIO_S6G_DES_CFG_DES_CPMD_SEL(x)                  (((x) << 8) & GENMASK(9, 8))
+#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_M                   GENMASK(9, 8)
+#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_X(x)                (((x) & GENMASK(9, 8)) >> 8)
+#define HSIO_S6G_DES_CFG_DES_BW_HYST(x)                   (((x) << 5) & GENMASK(7, 5))
+#define HSIO_S6G_DES_CFG_DES_BW_HYST_M                    GENMASK(7, 5)
+#define HSIO_S6G_DES_CFG_DES_BW_HYST_X(x)                 (((x) & GENMASK(7, 5)) >> 5)
+#define HSIO_S6G_DES_CFG_DES_SWAP_HYST                    BIT(4)
+#define HSIO_S6G_DES_CFG_DES_BW_ANA(x)                    (((x) << 1) & GENMASK(3, 1))
+#define HSIO_S6G_DES_CFG_DES_BW_ANA_M                     GENMASK(3, 1)
+#define HSIO_S6G_DES_CFG_DES_BW_ANA_X(x)                  (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_S6G_DES_CFG_DES_SWAP_ANA                     BIT(0)
+
+#define HSIO_S6G_IB_CFG_IB_SOFSI(x)                       (((x) << 29) & GENMASK(30, 29))
+#define HSIO_S6G_IB_CFG_IB_SOFSI_M                        GENMASK(30, 29)
+#define HSIO_S6G_IB_CFG_IB_SOFSI_X(x)                     (((x) & GENMASK(30, 29)) >> 29)
+#define HSIO_S6G_IB_CFG_IB_VBULK_SEL                      BIT(28)
+#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ(x)                    (((x) << 24) & GENMASK(27, 24))
+#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_M                     GENMASK(27, 24)
+#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_X(x)                  (((x) & GENMASK(27, 24)) >> 24)
+#define HSIO_S6G_IB_CFG_IB_ICML_ADJ(x)                    (((x) << 20) & GENMASK(23, 20))
+#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_M                     GENMASK(23, 20)
+#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_X(x)                  (((x) & GENMASK(23, 20)) >> 20)
+#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL(x)               (((x) << 18) & GENMASK(19, 18))
+#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_M                GENMASK(19, 18)
+#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_X(x)             (((x) & GENMASK(19, 18)) >> 18)
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(x)             (((x) << 15) & GENMASK(17, 15))
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M              GENMASK(17, 15)
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_X(x)           (((x) & GENMASK(17, 15)) >> 15)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP(x)              (((x) << 13) & GENMASK(14, 13))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_M               GENMASK(14, 13)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_X(x)            (((x) & GENMASK(14, 13)) >> 13)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID(x)             (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_M              GENMASK(12, 11)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_X(x)           (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP(x)              (((x) << 9) & GENMASK(10, 9))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_M               GENMASK(10, 9)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_X(x)            (((x) & GENMASK(10, 9)) >> 9)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(x)          (((x) << 7) & GENMASK(8, 7))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M           GENMASK(8, 7)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_X(x)        (((x) & GENMASK(8, 7)) >> 7)
+#define HSIO_S6G_IB_CFG_IB_ANA_TEST_ENA                   BIT(6)
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_ENA                    BIT(5)
+#define HSIO_S6G_IB_CFG_IB_CONCUR                         BIT(4)
+#define HSIO_S6G_IB_CFG_IB_CAL_ENA                        BIT(3)
+#define HSIO_S6G_IB_CFG_IB_SAM_ENA                        BIT(2)
+#define HSIO_S6G_IB_CFG_IB_EQZ_ENA                        BIT(1)
+#define HSIO_S6G_IB_CFG_IB_REG_ENA                        BIT(0)
+
+#define HSIO_S6G_IB_CFG1_IB_TJTAG(x)                      (((x) << 17) & GENMASK(21, 17))
+#define HSIO_S6G_IB_CFG1_IB_TJTAG_M                       GENMASK(21, 17)
+#define HSIO_S6G_IB_CFG1_IB_TJTAG_X(x)                    (((x) & GENMASK(21, 17)) >> 17)
+#define HSIO_S6G_IB_CFG1_IB_TSDET(x)                      (((x) << 12) & GENMASK(16, 12))
+#define HSIO_S6G_IB_CFG1_IB_TSDET_M                       GENMASK(16, 12)
+#define HSIO_S6G_IB_CFG1_IB_TSDET_X(x)                    (((x) & GENMASK(16, 12)) >> 12)
+#define HSIO_S6G_IB_CFG1_IB_SCALY(x)                      (((x) << 8) & GENMASK(11, 8))
+#define HSIO_S6G_IB_CFG1_IB_SCALY_M                       GENMASK(11, 8)
+#define HSIO_S6G_IB_CFG1_IB_SCALY_X(x)                    (((x) & GENMASK(11, 8)) >> 8)
+#define HSIO_S6G_IB_CFG1_IB_FILT_HP                       BIT(7)
+#define HSIO_S6G_IB_CFG1_IB_FILT_MID                      BIT(6)
+#define HSIO_S6G_IB_CFG1_IB_FILT_LP                       BIT(5)
+#define HSIO_S6G_IB_CFG1_IB_FILT_OFFSET                   BIT(4)
+#define HSIO_S6G_IB_CFG1_IB_FRC_HP                        BIT(3)
+#define HSIO_S6G_IB_CFG1_IB_FRC_MID                       BIT(2)
+#define HSIO_S6G_IB_CFG1_IB_FRC_LP                        BIT(1)
+#define HSIO_S6G_IB_CFG1_IB_FRC_OFFSET                    BIT(0)
+
+#define HSIO_S6G_IB_CFG2_IB_TINFV(x)                      (((x) << 27) & GENMASK(29, 27))
+#define HSIO_S6G_IB_CFG2_IB_TINFV_M                       GENMASK(29, 27)
+#define HSIO_S6G_IB_CFG2_IB_TINFV_X(x)                    (((x) & GENMASK(29, 27)) >> 27)
+#define HSIO_S6G_IB_CFG2_IB_OINFI(x)                      (((x) << 22) & GENMASK(26, 22))
+#define HSIO_S6G_IB_CFG2_IB_OINFI_M                       GENMASK(26, 22)
+#define HSIO_S6G_IB_CFG2_IB_OINFI_X(x)                    (((x) & GENMASK(26, 22)) >> 22)
+#define HSIO_S6G_IB_CFG2_IB_TAUX(x)                       (((x) << 19) & GENMASK(21, 19))
+#define HSIO_S6G_IB_CFG2_IB_TAUX_M                        GENMASK(21, 19)
+#define HSIO_S6G_IB_CFG2_IB_TAUX_X(x)                     (((x) & GENMASK(21, 19)) >> 19)
+#define HSIO_S6G_IB_CFG2_IB_OINFS(x)                      (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S6G_IB_CFG2_IB_OINFS_M                       GENMASK(18, 16)
+#define HSIO_S6G_IB_CFG2_IB_OINFS_X(x)                    (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S6G_IB_CFG2_IB_OCALS(x)                      (((x) << 10) & GENMASK(15, 10))
+#define HSIO_S6G_IB_CFG2_IB_OCALS_M                       GENMASK(15, 10)
+#define HSIO_S6G_IB_CFG2_IB_OCALS_X(x)                    (((x) & GENMASK(15, 10)) >> 10)
+#define HSIO_S6G_IB_CFG2_IB_TCALV(x)                      (((x) << 5) & GENMASK(9, 5))
+#define HSIO_S6G_IB_CFG2_IB_TCALV_M                       GENMASK(9, 5)
+#define HSIO_S6G_IB_CFG2_IB_TCALV_X(x)                    (((x) & GENMASK(9, 5)) >> 5)
+#define HSIO_S6G_IB_CFG2_IB_UMAX(x)                       (((x) << 3) & GENMASK(4, 3))
+#define HSIO_S6G_IB_CFG2_IB_UMAX_M                        GENMASK(4, 3)
+#define HSIO_S6G_IB_CFG2_IB_UMAX_X(x)                     (((x) & GENMASK(4, 3)) >> 3)
+#define HSIO_S6G_IB_CFG2_IB_UREG(x)                       ((x) & GENMASK(2, 0))
+#define HSIO_S6G_IB_CFG2_IB_UREG_M                        GENMASK(2, 0)
+
+#define HSIO_S6G_IB_CFG3_IB_INI_HP(x)                     (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_CFG3_IB_INI_HP_M                      GENMASK(23, 18)
+#define HSIO_S6G_IB_CFG3_IB_INI_HP_X(x)                   (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_CFG3_IB_INI_MID(x)                    (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_CFG3_IB_INI_MID_M                     GENMASK(17, 12)
+#define HSIO_S6G_IB_CFG3_IB_INI_MID_X(x)                  (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_CFG3_IB_INI_LP(x)                     (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_CFG3_IB_INI_LP_M                      GENMASK(11, 6)
+#define HSIO_S6G_IB_CFG3_IB_INI_LP_X(x)                   (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET(x)                 ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M                  GENMASK(5, 0)
+
+#define HSIO_S6G_IB_CFG4_IB_MAX_HP(x)                     (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_CFG4_IB_MAX_HP_M                      GENMASK(23, 18)
+#define HSIO_S6G_IB_CFG4_IB_MAX_HP_X(x)                   (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_CFG4_IB_MAX_MID(x)                    (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_CFG4_IB_MAX_MID_M                     GENMASK(17, 12)
+#define HSIO_S6G_IB_CFG4_IB_MAX_MID_X(x)                  (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_CFG4_IB_MAX_LP(x)                     (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_CFG4_IB_MAX_LP_M                      GENMASK(11, 6)
+#define HSIO_S6G_IB_CFG4_IB_MAX_LP_X(x)                   (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET(x)                 ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET_M                  GENMASK(5, 0)
+
+#define HSIO_S6G_IB_CFG5_IB_MIN_HP(x)                     (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_CFG5_IB_MIN_HP_M                      GENMASK(23, 18)
+#define HSIO_S6G_IB_CFG5_IB_MIN_HP_X(x)                   (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_CFG5_IB_MIN_MID(x)                    (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_CFG5_IB_MIN_MID_M                     GENMASK(17, 12)
+#define HSIO_S6G_IB_CFG5_IB_MIN_MID_X(x)                  (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_CFG5_IB_MIN_LP(x)                     (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_CFG5_IB_MIN_LP_M                      GENMASK(11, 6)
+#define HSIO_S6G_IB_CFG5_IB_MIN_LP_X(x)                   (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET(x)                 ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET_M                  GENMASK(5, 0)
+
+#define HSIO_S6G_OB_CFG_OB_IDLE                           BIT(31)
+#define HSIO_S6G_OB_CFG_OB_ENA1V_MODE                     BIT(30)
+#define HSIO_S6G_OB_CFG_OB_POL                            BIT(29)
+#define HSIO_S6G_OB_CFG_OB_POST0(x)                       (((x) << 23) & GENMASK(28, 23))
+#define HSIO_S6G_OB_CFG_OB_POST0_M                        GENMASK(28, 23)
+#define HSIO_S6G_OB_CFG_OB_POST0_X(x)                     (((x) & GENMASK(28, 23)) >> 23)
+#define HSIO_S6G_OB_CFG_OB_PREC(x)                        (((x) << 18) & GENMASK(22, 18))
+#define HSIO_S6G_OB_CFG_OB_PREC_M                         GENMASK(22, 18)
+#define HSIO_S6G_OB_CFG_OB_PREC_X(x)                      (((x) & GENMASK(22, 18)) >> 18)
+#define HSIO_S6G_OB_CFG_OB_R_ADJ_MUX                      BIT(17)
+#define HSIO_S6G_OB_CFG_OB_R_ADJ_PDR                      BIT(16)
+#define HSIO_S6G_OB_CFG_OB_POST1(x)                       (((x) << 11) & GENMASK(15, 11))
+#define HSIO_S6G_OB_CFG_OB_POST1_M                        GENMASK(15, 11)
+#define HSIO_S6G_OB_CFG_OB_POST1_X(x)                     (((x) & GENMASK(15, 11)) >> 11)
+#define HSIO_S6G_OB_CFG_OB_R_COR                          BIT(10)
+#define HSIO_S6G_OB_CFG_OB_SEL_RCTRL                      BIT(9)
+#define HSIO_S6G_OB_CFG_OB_SR_H                           BIT(8)
+#define HSIO_S6G_OB_CFG_OB_SR(x)                          (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S6G_OB_CFG_OB_SR_M                           GENMASK(7, 4)
+#define HSIO_S6G_OB_CFG_OB_SR_X(x)                        (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL(x)               ((x) & GENMASK(3, 0))
+#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL_M                GENMASK(3, 0)
+
+#define HSIO_S6G_OB_CFG1_OB_ENA_CAS(x)                    (((x) << 6) & GENMASK(8, 6))
+#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_M                     GENMASK(8, 6)
+#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_X(x)                  (((x) & GENMASK(8, 6)) >> 6)
+#define HSIO_S6G_OB_CFG1_OB_LEV(x)                        ((x) & GENMASK(5, 0))
+#define HSIO_S6G_OB_CFG1_OB_LEV_M                         GENMASK(5, 0)
+
+#define HSIO_S6G_SER_CFG_SER_4TAP_ENA                     BIT(8)
+#define HSIO_S6G_SER_CFG_SER_CPMD_SEL                     BIT(7)
+#define HSIO_S6G_SER_CFG_SER_SWAP_CPMD                    BIT(6)
+#define HSIO_S6G_SER_CFG_SER_ALISEL(x)                    (((x) << 4) & GENMASK(5, 4))
+#define HSIO_S6G_SER_CFG_SER_ALISEL_M                     GENMASK(5, 4)
+#define HSIO_S6G_SER_CFG_SER_ALISEL_X(x)                  (((x) & GENMASK(5, 4)) >> 4)
+#define HSIO_S6G_SER_CFG_SER_ENHYS                        BIT(3)
+#define HSIO_S6G_SER_CFG_SER_BIG_WIN                      BIT(2)
+#define HSIO_S6G_SER_CFG_SER_EN_WIN                       BIT(1)
+#define HSIO_S6G_SER_CFG_SER_ENALI                        BIT(0)
+
+#define HSIO_S6G_COMMON_CFG_SYS_RST                       BIT(17)
+#define HSIO_S6G_COMMON_CFG_SE_DIV2_ENA                   BIT(16)
+#define HSIO_S6G_COMMON_CFG_SE_AUTO_SQUELCH_ENA           BIT(15)
+#define HSIO_S6G_COMMON_CFG_ENA_LANE                      BIT(14)
+#define HSIO_S6G_COMMON_CFG_PWD_RX                        BIT(13)
+#define HSIO_S6G_COMMON_CFG_PWD_TX                        BIT(12)
+#define HSIO_S6G_COMMON_CFG_LANE_CTRL(x)                  (((x) << 9) & GENMASK(11, 9))
+#define HSIO_S6G_COMMON_CFG_LANE_CTRL_M                   GENMASK(11, 9)
+#define HSIO_S6G_COMMON_CFG_LANE_CTRL_X(x)                (((x) & GENMASK(11, 9)) >> 9)
+#define HSIO_S6G_COMMON_CFG_ENA_DIRECT                    BIT(8)
+#define HSIO_S6G_COMMON_CFG_ENA_ELOOP                     BIT(7)
+#define HSIO_S6G_COMMON_CFG_ENA_FLOOP                     BIT(6)
+#define HSIO_S6G_COMMON_CFG_ENA_ILOOP                     BIT(5)
+#define HSIO_S6G_COMMON_CFG_ENA_PLOOP                     BIT(4)
+#define HSIO_S6G_COMMON_CFG_HRATE                         BIT(3)
+#define HSIO_S6G_COMMON_CFG_QRATE                         BIT(2)
+#define HSIO_S6G_COMMON_CFG_IF_MODE(x)                    ((x) & GENMASK(1, 0))
+#define HSIO_S6G_COMMON_CFG_IF_MODE_M                     GENMASK(1, 0)
+
+#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS(x)                  (((x) << 16) & GENMASK(17, 16))
+#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_M                   GENMASK(17, 16)
+#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_X(x)                (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_S6G_PLL_CFG_PLL_DIV4                         BIT(15)
+#define HSIO_S6G_PLL_CFG_PLL_ENA_ROT                      BIT(14)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA(x)             (((x) << 6) & GENMASK(13, 6))
+#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M              GENMASK(13, 6)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x)           (((x) & GENMASK(13, 6)) >> 6)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_ENA                      BIT(5)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_FORCE_SET_ENA            BIT(4)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA            BIT(3)
+#define HSIO_S6G_PLL_CFG_PLL_RB_DATA_SEL                  BIT(2)
+#define HSIO_S6G_PLL_CFG_PLL_ROT_DIR                      BIT(1)
+#define HSIO_S6G_PLL_CFG_PLL_ROT_FRQ                      BIT(0)
+
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_N            BIT(5)
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_P            BIT(4)
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_CLK               BIT(3)
+#define HSIO_S6G_ACJTAG_CFG_OB_DIRECT                     BIT(2)
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_ENA                    BIT(1)
+#define HSIO_S6G_ACJTAG_CFG_JTAG_CTRL_ENA                 BIT(0)
+
+#define HSIO_S6G_GP_CFG_GP_MSB(x)                         (((x) << 16) & GENMASK(31, 16))
+#define HSIO_S6G_GP_CFG_GP_MSB_M                          GENMASK(31, 16)
+#define HSIO_S6G_GP_CFG_GP_MSB_X(x)                       (((x) & GENMASK(31, 16)) >> 16)
+#define HSIO_S6G_GP_CFG_GP_LSB(x)                         ((x) & GENMASK(15, 0))
+#define HSIO_S6G_GP_CFG_GP_LSB_M                          GENMASK(15, 0)
+
+#define HSIO_S6G_IB_STATUS0_IB_CAL_DONE                   BIT(8)
+#define HSIO_S6G_IB_STATUS0_IB_HP_GAIN_ACT                BIT(7)
+#define HSIO_S6G_IB_STATUS0_IB_MID_GAIN_ACT               BIT(6)
+#define HSIO_S6G_IB_STATUS0_IB_LP_GAIN_ACT                BIT(5)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ACT                 BIT(4)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSET_VLD                 BIT(3)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ERR                 BIT(2)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSDIR                    BIT(1)
+#define HSIO_S6G_IB_STATUS0_IB_SIG_DET                    BIT(0)
+
+#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT(x)            (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_M             GENMASK(23, 18)
+#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_X(x)          (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT(x)           (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_M            GENMASK(17, 12)
+#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_X(x)         (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT(x)            (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_M             GENMASK(11, 6)
+#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_X(x)          (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT(x)             ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT_M              GENMASK(5, 0)
+
+#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_N         BIT(2)
+#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_P         BIT(1)
+#define HSIO_S6G_ACJTAG_STATUS_IB_DIRECT                  BIT(0)
+
+#define HSIO_S6G_PLL_STATUS_PLL_CAL_NOT_DONE              BIT(10)
+#define HSIO_S6G_PLL_STATUS_PLL_CAL_ERR                   BIT(9)
+#define HSIO_S6G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR          BIT(8)
+#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA(x)                ((x) & GENMASK(7, 0))
+#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA_M                 GENMASK(7, 0)
+
+#define HSIO_S6G_REVID_SERDES_REV(x)                      (((x) << 26) & GENMASK(31, 26))
+#define HSIO_S6G_REVID_SERDES_REV_M                       GENMASK(31, 26)
+#define HSIO_S6G_REVID_SERDES_REV_X(x)                    (((x) & GENMASK(31, 26)) >> 26)
+#define HSIO_S6G_REVID_RCPLL_REV(x)                       (((x) << 21) & GENMASK(25, 21))
+#define HSIO_S6G_REVID_RCPLL_REV_M                        GENMASK(25, 21)
+#define HSIO_S6G_REVID_RCPLL_REV_X(x)                     (((x) & GENMASK(25, 21)) >> 21)
+#define HSIO_S6G_REVID_SER_REV(x)                         (((x) << 16) & GENMASK(20, 16))
+#define HSIO_S6G_REVID_SER_REV_M                          GENMASK(20, 16)
+#define HSIO_S6G_REVID_SER_REV_X(x)                       (((x) & GENMASK(20, 16)) >> 16)
+#define HSIO_S6G_REVID_DES_REV(x)                         (((x) << 10) & GENMASK(15, 10))
+#define HSIO_S6G_REVID_DES_REV_M                          GENMASK(15, 10)
+#define HSIO_S6G_REVID_DES_REV_X(x)                       (((x) & GENMASK(15, 10)) >> 10)
+#define HSIO_S6G_REVID_OB_REV(x)                          (((x) << 5) & GENMASK(9, 5))
+#define HSIO_S6G_REVID_OB_REV_M                           GENMASK(9, 5)
+#define HSIO_S6G_REVID_OB_REV_X(x)                        (((x) & GENMASK(9, 5)) >> 5)
+#define HSIO_S6G_REVID_IB_REV(x)                          ((x) & GENMASK(4, 0))
+#define HSIO_S6G_REVID_IB_REV_M                           GENMASK(4, 0)
+
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT        BIT(31)
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT        BIT(30)
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(x)            ((x) & GENMASK(24, 0))
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR_M             GENMASK(24, 0)
+
+#define HSIO_HW_CFG_DEV2G5_10_MODE                        BIT(6)
+#define HSIO_HW_CFG_DEV1G_9_MODE                          BIT(5)
+#define HSIO_HW_CFG_DEV1G_6_MODE                          BIT(4)
+#define HSIO_HW_CFG_DEV1G_5_MODE                          BIT(3)
+#define HSIO_HW_CFG_DEV1G_4_MODE                          BIT(2)
+#define HSIO_HW_CFG_PCIE_ENA                              BIT(1)
+#define HSIO_HW_CFG_QSGMII_ENA                            BIT(0)
+
+#define HSIO_HW_QSGMII_CFG_SHYST_DIS                      BIT(3)
+#define HSIO_HW_QSGMII_CFG_E_DET_ENA                      BIT(2)
+#define HSIO_HW_QSGMII_CFG_USE_I1_ENA                     BIT(1)
+#define HSIO_HW_QSGMII_CFG_FLIP_LANES                     BIT(0)
+
+#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS(x)           (((x) << 1) & GENMASK(6, 1))
+#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_M            GENMASK(6, 1)
+#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_X(x)         (((x) & GENMASK(6, 1)) >> 1)
+#define HSIO_HW_QSGMII_STAT_SYNC                          BIT(0)
+
+#define HSIO_CLK_CFG_CLKDIV_PHY(x)                        (((x) << 1) & GENMASK(8, 1))
+#define HSIO_CLK_CFG_CLKDIV_PHY_M                         GENMASK(8, 1)
+#define HSIO_CLK_CFG_CLKDIV_PHY_X(x)                      (((x) & GENMASK(8, 1)) >> 1)
+#define HSIO_CLK_CFG_CLKDIV_PHY_DIS                       BIT(0)
+
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_TEMP_RD               BIT(5)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_RUN                   BIT(4)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_NO_RST                BIT(3)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_POWER_UP              BIT(2)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_CLK                   BIT(1)
+#define HSIO_TEMP_SENSOR_CTRL_SAMPLE_ENA                  BIT(0)
+
+#define HSIO_TEMP_SENSOR_CFG_RUN_WID(x)                   (((x) << 8) & GENMASK(15, 8))
+#define HSIO_TEMP_SENSOR_CFG_RUN_WID_M                    GENMASK(15, 8)
+#define HSIO_TEMP_SENSOR_CFG_RUN_WID_X(x)                 (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER(x)                ((x) & GENMASK(7, 0))
+#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER_M                 GENMASK(7, 0)
+
+#define HSIO_TEMP_SENSOR_STAT_TEMP_VALID                  BIT(8)
+#define HSIO_TEMP_SENSOR_STAT_TEMP(x)                     ((x) & GENMASK(7, 0))
+#define HSIO_TEMP_SENSOR_STAT_TEMP_M                      GENMASK(7, 0)
+
+#endif
index 7113728459451d50f08e5db83680ddf21aca05f9..705b33d1e395e86cd8889d9dff3b2f93bc28a608 100644 (file)
@@ -70,33 +70,6 @@ TRACE_EVENT(mm_migrate_pages,
                __print_symbolic(__entry->mode, MIGRATE_MODE),
                __print_symbolic(__entry->reason, MIGRATE_REASON))
 );
-
-TRACE_EVENT(mm_numa_migrate_ratelimit,
-
-       TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages),
-
-       TP_ARGS(p, dst_nid, nr_pages),
-
-       TP_STRUCT__entry(
-               __array(        char,           comm,   TASK_COMM_LEN)
-               __field(        pid_t,          pid)
-               __field(        int,            dst_nid)
-               __field(        unsigned long,  nr_pages)
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-               __entry->pid            = p->pid;
-               __entry->dst_nid        = dst_nid;
-               __entry->nr_pages       = nr_pages;
-       ),
-
-       TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu",
-               __entry->comm,
-               __entry->pid,
-               __entry->dst_nid,
-               __entry->nr_pages)
-);
 #endif /* _TRACE_MIGRATE_H */
 
 /* This part must be outside protection */
index 196587b8f204de13da0529c3cce46b68df75b4ac..837393fa897bb764264741ec2051f163841f0a4d 100644 (file)
@@ -56,7 +56,6 @@ enum rxrpc_peer_trace {
        rxrpc_peer_new,
        rxrpc_peer_processing,
        rxrpc_peer_put,
-       rxrpc_peer_queued_error,
 };
 
 enum rxrpc_conn_trace {
@@ -257,8 +256,7 @@ enum rxrpc_tx_point {
        EM(rxrpc_peer_got,                      "GOT") \
        EM(rxrpc_peer_new,                      "NEW") \
        EM(rxrpc_peer_processing,               "PRO") \
-       EM(rxrpc_peer_put,                      "PUT") \
-       E_(rxrpc_peer_queued_error,             "QER")
+       E_(rxrpc_peer_put,                      "PUT")
 
 #define rxrpc_conn_traces \
        EM(rxrpc_conn_got,                      "GOT") \
index e4732d3c2998264857772faac64409c69ff8e43d..b0f8e87235bdf4b599b52895637d9bd6329887fa 100644 (file)
@@ -26,7 +26,9 @@
 #define HUGETLB_FLAG_ENCODE_2MB                (21 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_8MB                (23 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_16MB       (24 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_32MB       (25 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_256MB      (28 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512MB      (29 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_1GB                (30 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_2GB                (31 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_16GB       (34 << HUGETLB_FLAG_ENCODE_SHIFT)
diff --git a/include/uapi/linux/dns_resolver.h b/include/uapi/linux/dns_resolver.h
new file mode 100644 (file)
index 0000000..129745f
--- /dev/null
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/* DNS resolver interface definitions.
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_DNS_RESOLVER_H
+#define _UAPI_LINUX_DNS_RESOLVER_H
+
+#include <linux/types.h>
+
+/*
+ * Type of payload.
+ */
+enum dns_payload_content_type {
+       DNS_PAYLOAD_IS_SERVER_LIST      = 0, /* List of servers, requested by srv=1 */
+};
+
+/*
+ * Type of address that might be found in an address record.
+ */
+enum dns_payload_address_type {
+       DNS_ADDRESS_IS_IPV4             = 0, /* 4-byte AF_INET address */
+       DNS_ADDRESS_IS_IPV6             = 1, /* 16-byte AF_INET6 address */
+};
+
+/*
+ * Type of protocol used to access a server.
+ */
+enum dns_payload_protocol_type {
+       DNS_SERVER_PROTOCOL_UNSPECIFIED = 0,
+       DNS_SERVER_PROTOCOL_UDP         = 1, /* Use UDP to talk to the server */
+       DNS_SERVER_PROTOCOL_TCP         = 2, /* Use TCP to talk to the server */
+};
+
+/*
+ * Source of record included in DNS resolver payload.
+ */
+enum dns_record_source {
+       DNS_RECORD_UNAVAILABLE          = 0, /* No source available (empty record) */
+       DNS_RECORD_FROM_CONFIG          = 1, /* From local configuration data */
+       DNS_RECORD_FROM_DNS_A           = 2, /* From DNS A or AAAA record */
+       DNS_RECORD_FROM_DNS_AFSDB       = 3, /* From DNS AFSDB record */
+       DNS_RECORD_FROM_DNS_SRV         = 4, /* From DNS SRV record */
+       DNS_RECORD_FROM_NSS             = 5, /* From NSS */
+       NR__dns_record_source
+};
+
+/*
+ * Status of record included in DNS resolver payload.
+ */
+enum dns_lookup_status {
+       DNS_LOOKUP_NOT_DONE             = 0, /* No lookup has been made */
+       DNS_LOOKUP_GOOD                 = 1, /* Good records obtained */
+       DNS_LOOKUP_GOOD_WITH_BAD        = 2, /* Good records, some decoding errors */
+       DNS_LOOKUP_BAD                  = 3, /* Couldn't decode results */
+       DNS_LOOKUP_GOT_NOT_FOUND        = 4, /* Got a "Not Found" result */
+       DNS_LOOKUP_GOT_LOCAL_FAILURE    = 5, /* Local failure during lookup */
+       DNS_LOOKUP_GOT_TEMP_FAILURE     = 6, /* Temporary failure during lookup */
+       DNS_LOOKUP_GOT_NS_FAILURE       = 7, /* Name server failure */
+       NR__dns_lookup_status
+};
+
+/*
+ * Header at the beginning of binary format payload.
+ */
+struct dns_payload_header {
+       __u8            zero;           /* Zero byte: marks this as not being text */
+       __u8            content;        /* enum dns_payload_content_type */
+       __u8            version;        /* Encoding version */
+} __packed;
+
+/*
+ * Header at the beginning of a V1 server list.  This is followed directly by
+ * the server records.  Each server records begins with a struct of type
+ * dns_server_list_v1_server.
+ */
+struct dns_server_list_v1_header {
+       struct dns_payload_header hdr;
+       __u8            source;         /* enum dns_record_source */
+       __u8            status;         /* enum dns_lookup_status */
+       __u8            nr_servers;     /* Number of server records following this */
+} __packed;
+
+/*
+ * Header at the beginning of each V1 server record.  This is followed by the
+ * characters of the name with no NUL-terminator, followed by the address
+ * records for that server.  Each address record begins with a struct of type
+ * struct dns_server_list_v1_address.
+ */
+struct dns_server_list_v1_server {
+       __u16           name_len;       /* Length of name (LE) */
+       __u16           priority;       /* Priority (as SRV record) (LE) */
+       __u16           weight;         /* Weight (as SRV record) (LE) */
+       __u16           port;           /* UDP/TCP port number (LE) */
+       __u8            source;         /* enum dns_record_source */
+       __u8            status;         /* enum dns_lookup_status */
+       __u8            protocol;       /* enum dns_payload_protocol_type */
+       __u8            nr_addrs;
+} __packed;
+
+/*
+ * Header at the beginning of each V1 address record.  This is followed by the
+ * bytes of the address, 4 for IPV4 and 16 for IPV6.
+ */
+struct dns_server_list_v1_address {
+       __u8            address_type;   /* enum dns_payload_address_type */
+} __packed;
+
+#endif /* _UAPI_LINUX_DNS_RESOLVER_H */
index 015a4c0bbb47d6e9cabc6aac2c254e8f6c7ee960..7a8a26751c2317ee30bf5f84e2be9fc977c9d2f9 100644 (file)
@@ -25,7 +25,9 @@
 #define MFD_HUGE_2MB   HUGETLB_FLAG_ENCODE_2MB
 #define MFD_HUGE_8MB   HUGETLB_FLAG_ENCODE_8MB
 #define MFD_HUGE_16MB  HUGETLB_FLAG_ENCODE_16MB
+#define MFD_HUGE_32MB  HUGETLB_FLAG_ENCODE_32MB
 #define MFD_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MFD_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
 #define MFD_HUGE_1GB   HUGETLB_FLAG_ENCODE_1GB
 #define MFD_HUGE_2GB   HUGETLB_FLAG_ENCODE_2GB
 #define MFD_HUGE_16GB  HUGETLB_FLAG_ENCODE_16GB
index bfd5938fede6c1ba3b71d096cd36127da2837042..d0f515d53299ea5784ffdb61dd1b829b04fd045c 100644 (file)
@@ -28,7 +28,9 @@
 #define MAP_HUGE_2MB   HUGETLB_FLAG_ENCODE_2MB
 #define MAP_HUGE_8MB   HUGETLB_FLAG_ENCODE_8MB
 #define MAP_HUGE_16MB  HUGETLB_FLAG_ENCODE_16MB
+#define MAP_HUGE_32MB  HUGETLB_FLAG_ENCODE_32MB
 #define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MAP_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
 #define MAP_HUGE_1GB   HUGETLB_FLAG_ENCODE_1GB
 #define MAP_HUGE_2GB   HUGETLB_FLAG_ENCODE_2GB
 #define MAP_HUGE_16GB  HUGETLB_FLAG_ENCODE_16GB
index e23290ffdc77d7d44395e22614264988a5381f69..5444e76870bbc97696c55f8caed791b6e56d1f8c 100644 (file)
@@ -826,12 +826,14 @@ enum nft_meta_keys {
  * @NFT_RT_NEXTHOP4: routing nexthop for IPv4
  * @NFT_RT_NEXTHOP6: routing nexthop for IPv6
  * @NFT_RT_TCPMSS: fetch current path tcp mss
+ * @NFT_RT_XFRM: boolean, skb->dst->xfrm != NULL
  */
 enum nft_rt_keys {
        NFT_RT_CLASSID,
        NFT_RT_NEXTHOP4,
        NFT_RT_NEXTHOP6,
        NFT_RT_TCPMSS,
+       NFT_RT_XFRM,
        __NFT_RT_MAX
 };
 #define NFT_RT_MAX             (__NFT_RT_MAX - 1)
@@ -1174,6 +1176,21 @@ enum nft_quota_attributes {
 };
 #define NFTA_QUOTA_MAX         (__NFTA_QUOTA_MAX - 1)
 
+/**
+ * enum nft_secmark_attributes - nf_tables secmark object netlink attributes
+ *
+ * @NFTA_SECMARK_CTX: security context (NLA_STRING)
+ */
+enum nft_secmark_attributes {
+       NFTA_SECMARK_UNSPEC,
+       NFTA_SECMARK_CTX,
+       __NFTA_SECMARK_MAX,
+};
+#define NFTA_SECMARK_MAX       (__NFTA_SECMARK_MAX - 1)
+
+/* Max security context length */
+#define NFT_SECMARK_CTX_MAXLEN         256
+
 /**
  * enum nft_reject_types - nf_tables reject expression reject types
  *
@@ -1430,7 +1447,8 @@ enum nft_ct_timeout_timeout_attributes {
 #define NFT_OBJECT_CONNLIMIT   5
 #define NFT_OBJECT_TUNNEL      6
 #define NFT_OBJECT_CT_TIMEOUT  7
-#define __NFT_OBJECT_MAX       8
+#define NFT_OBJECT_SECMARK     8
+#define __NFT_OBJECT_MAX       9
 #define NFT_OBJECT_MAX         (__NFT_OBJECT_MAX - 1)
 
 /**
@@ -1512,6 +1530,35 @@ enum nft_devices_attributes {
 };
 #define NFTA_DEVICE_MAX                (__NFTA_DEVICE_MAX - 1)
 
+/*
+ * enum nft_xfrm_attributes - nf_tables xfrm expr netlink attributes
+ *
+ * @NFTA_XFRM_DREG: destination register (NLA_U32)
+ * @NFTA_XFRM_KEY: enum nft_xfrm_keys (NLA_U32)
+ * @NFTA_XFRM_DIR: direction (NLA_U8)
+ * @NFTA_XFRM_SPNUM: index in secpath array (NLA_U32)
+ */
+enum nft_xfrm_attributes {
+       NFTA_XFRM_UNSPEC,
+       NFTA_XFRM_DREG,
+       NFTA_XFRM_KEY,
+       NFTA_XFRM_DIR,
+       NFTA_XFRM_SPNUM,
+       __NFTA_XFRM_MAX
+};
+#define NFTA_XFRM_MAX (__NFTA_XFRM_MAX - 1)
+
+enum nft_xfrm_keys {
+       NFT_XFRM_KEY_UNSPEC,
+       NFT_XFRM_KEY_DADDR_IP4,
+       NFT_XFRM_KEY_DADDR_IP6,
+       NFT_XFRM_KEY_SADDR_IP4,
+       NFT_XFRM_KEY_SADDR_IP6,
+       NFT_XFRM_KEY_REQID,
+       NFT_XFRM_KEY_SPI,
+       __NFT_XFRM_KEY_MAX,
+};
+#define NFT_XFRM_KEY_MAX (__NFT_XFRM_KEY_MAX - 1)
 
 /**
  * enum nft_trace_attributes - nf_tables trace netlink attributes
index e96dfa1b34f7ff8a118105b936b2e1324d7ebe94..b74e370d613346b1669082313ef6de4b6ed3ed4b 100644 (file)
@@ -22,4 +22,20 @@ struct xt_cgroup_info_v1 {
        void            *priv __attribute__((aligned(8)));
 };
 
+#define XT_CGROUP_PATH_MAX     512
+
+struct xt_cgroup_info_v2 {
+       __u8            has_path;
+       __u8            has_classid;
+       __u8            invert_path;
+       __u8            invert_classid;
+       union {
+               char    path[XT_CGROUP_PATH_MAX];
+               __u32   classid;
+       };
+
+       /* kernel internal data */
+       void            *priv __attribute__((aligned(8)));
+};
+
 #endif /* _UAPI_XT_CGROUP_H */
index f3ba5d9e58b6da4120db1c87877937315e68bbbb..d72fd52adbba62ab280c5bb79741681666ac3d11 100644 (file)
@@ -15,9 +15,11 @@ struct xt_quota_info {
        __u32 flags;
        __u32 pad;
        __aligned_u64 quota;
-
-       /* Used internally by the kernel */
-       struct xt_quota_priv    *master;
+#ifdef __KERNEL__
+       atomic64_t counter;
+#else
+       __aligned_u64 remain;
+#endif
 };
 
 #endif /* _XT_QUOTA_H */
index 776bc92e91180725e75f0291b1635234d6b6875f..486ed1f0c0bc17f48dca895ebf9581aa7d69278d 100644 (file)
@@ -155,6 +155,7 @@ enum nlmsgerr_attrs {
 #define NETLINK_LIST_MEMBERSHIPS       9
 #define NETLINK_CAP_ACK                        10
 #define NETLINK_EXT_ACK                        11
+#define NETLINK_DUMP_STRICT_CHK                12
 
 struct nl_pktinfo {
        __u32   group;
index e9b7244ac3819b7cf56c912a2337aa0e1bde820c..89ee47c2f17d86fba9a37733b5593680ceefcf00 100644 (file)
@@ -1084,4 +1084,50 @@ enum {
        CAKE_ATM_MAX
 };
 
+
+/* TAPRIO */
+enum {
+       TC_TAPRIO_CMD_SET_GATES = 0x00,
+       TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
+       TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
+};
+
+enum {
+       TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
+       TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
+       TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
+       TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
+       TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
+       __TCA_TAPRIO_SCHED_ENTRY_MAX,
+};
+#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
+
+/* The format for schedule entry list is:
+ * [TCA_TAPRIO_SCHED_ENTRY_LIST]
+ *   [TCA_TAPRIO_SCHED_ENTRY]
+ *     [TCA_TAPRIO_SCHED_ENTRY_CMD]
+ *     [TCA_TAPRIO_SCHED_ENTRY_GATES]
+ *     [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
+ */
+enum {
+       TCA_TAPRIO_SCHED_UNSPEC,
+       TCA_TAPRIO_SCHED_ENTRY,
+       __TCA_TAPRIO_SCHED_MAX,
+};
+
+#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
+
+enum {
+       TCA_TAPRIO_ATTR_UNSPEC,
+       TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
+       TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
+       TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
+       TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
+       TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
+       TCA_TAPRIO_PAD,
+       __TCA_TAPRIO_ATTR_MAX,
+};
+
+#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
+
 #endif
index dde1344f047cf04c541a21f4d983c9eb3c606ef5..6507ad0afc81d93713abee6f5790451dd4499d29 100644 (file)
@@ -65,7 +65,9 @@ struct shmid_ds {
 #define SHM_HUGE_2MB   HUGETLB_FLAG_ENCODE_2MB
 #define SHM_HUGE_8MB   HUGETLB_FLAG_ENCODE_8MB
 #define SHM_HUGE_16MB  HUGETLB_FLAG_ENCODE_16MB
+#define SHM_HUGE_32MB  HUGETLB_FLAG_ENCODE_32MB
 #define SHM_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define SHM_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
 #define SHM_HUGE_1GB   HUGETLB_FLAG_ENCODE_1GB
 #define SHM_HUGE_2GB   HUGETLB_FLAG_ENCODE_2GB
 #define SHM_HUGE_16GB  HUGETLB_FLAG_ENCODE_16GB
index 4cd402e4cfeb603e2417a3796c3d9b22f3022f89..1c65fb357395eace45f25a7c6a37d91ebe30a722 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -206,7 +206,7 @@ err:
         * Callers of shm_lock() must validate the status of the returned ipc
         * object pointer and error out as appropriate.
         */
-       return (void *)ipcp;
+       return ERR_CAST(ipcp);
 }
 
 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
index 944eb297465fd1c648fc41069e8ea6bb3ff6c976..c97a8f968638c6da0c2ec32c591753f69af59e15 100644 (file)
@@ -130,7 +130,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
        struct bpf_cgroup_storage *storage;
        struct bpf_storage_buffer *new;
 
-       if (flags & BPF_NOEXIST)
+       if (flags != BPF_ANY && flags != BPF_EXIST)
                return -EINVAL;
 
        storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
@@ -261,6 +261,9 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
        if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
                return ERR_PTR(-EINVAL);
 
+       if (attr->value_size == 0)
+               return ERR_PTR(-EINVAL);
+
        if (attr->value_size > PAGE_SIZE)
                return ERR_PTR(-E2BIG);
 
index 73cc136915fe282b425306b378f747b0468bd838..3f93a548a6426fc763f79d543f2c1f464ac08e81 100644 (file)
@@ -3144,6 +3144,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        u64 umin_val, umax_val;
        u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
 
+       if (insn_bitness == 32) {
+               /* Relevant for 32-bit RSH: Information can propagate towards
+                * LSB, so it isn't sufficient to only truncate the output to
+                * 32 bits.
+                */
+               coerce_reg_to_size(dst_reg, 4);
+               coerce_reg_to_size(&src_reg, 4);
+       }
+
        smin_val = src_reg.smin_value;
        smax_val = src_reg.smax_value;
        umin_val = src_reg.umin_value;
@@ -3379,7 +3388,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        if (BPF_CLASS(insn->code) != BPF_ALU64) {
                /* 32-bit ALU ops are (32,32)->32 */
                coerce_reg_to_size(dst_reg, 4);
-               coerce_reg_to_size(&src_reg, 4);
        }
 
        __reg_deduce_bounds(dst_reg);
index 9bd54304446f8b477a61a799a2d75136672548bd..1b1d63b3634b580cf6b29384e162f76b28e05102 100644 (file)
@@ -23,6 +23,9 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU
        bool
        select NEED_DMA_MAP_STATE
 
+config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+       bool
+
 config DMA_DIRECT_OPS
        bool
        depends on HAS_DMA
index c80549bf82c6628fea20d230edc8b824e9f36f4d..5a97f34bc14c8e2e31a452cb6fdcd38774a3b9a1 100644 (file)
@@ -3935,6 +3935,12 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
                goto out;
        }
 
+       /* If this is a pinned event it must be running on this CPU */
+       if (event->attr.pinned && event->oncpu != smp_processor_id()) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        /*
         * If the event is currently on this CPU, its either a per-task event,
         * or local to this CPU. Furthermore it means its ACTIVE (otherwise
@@ -8308,6 +8314,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
                        goto unlock;
 
                list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+                       if (event->cpu != smp_processor_id())
+                               continue;
                        if (event->attr.type != PERF_TYPE_TRACEPOINT)
                                continue;
                        if (event->attr.config != entry->type)
@@ -9425,9 +9433,7 @@ static void free_pmu_context(struct pmu *pmu)
        if (pmu->task_ctx_nr > perf_invalid_context)
                return;
 
-       mutex_lock(&pmus_lock);
        free_percpu(pmu->pmu_cpu_context);
-       mutex_unlock(&pmus_lock);
 }
 
 /*
@@ -9683,12 +9689,8 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
 
 void perf_pmu_unregister(struct pmu *pmu)
 {
-       int remove_device;
-
        mutex_lock(&pmus_lock);
-       remove_device = pmu_bus_running;
        list_del_rcu(&pmu->entry);
-       mutex_unlock(&pmus_lock);
 
        /*
         * We dereference the pmu list under both SRCU and regular RCU, so
@@ -9700,13 +9702,14 @@ void perf_pmu_unregister(struct pmu *pmu)
        free_percpu(pmu->pmu_disable_count);
        if (pmu->type >= PERF_TYPE_MAX)
                idr_remove(&pmu_idr, pmu->type);
-       if (remove_device) {
+       if (pmu_bus_running) {
                if (pmu->nr_addr_filters)
                        device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
                device_del(pmu->dev);
                put_device(pmu->dev);
        }
        free_pmu_context(pmu);
+       mutex_unlock(&pmus_lock);
 }
 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
 
index 0be047dbd8971dcd4ee1282936d3b3056f6ebfea..65a3b7e55b9fcd2b289e09d194a179f1cc8accc5 100644 (file)
@@ -260,7 +260,7 @@ static void test_cycle_work(struct work_struct *work)
 {
        struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
        struct ww_acquire_ctx ctx;
-       int err;
+       int err, erra = 0;
 
        ww_acquire_init(&ctx, &ww_class);
        ww_mutex_lock(&cycle->a_mutex, &ctx);
@@ -270,17 +270,19 @@ static void test_cycle_work(struct work_struct *work)
 
        err = ww_mutex_lock(cycle->b_mutex, &ctx);
        if (err == -EDEADLK) {
+               err = 0;
                ww_mutex_unlock(&cycle->a_mutex);
                ww_mutex_lock_slow(cycle->b_mutex, &ctx);
-               err = ww_mutex_lock(&cycle->a_mutex, &ctx);
+               erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
        }
 
        if (!err)
                ww_mutex_unlock(cycle->b_mutex);
-       ww_mutex_unlock(&cycle->a_mutex);
+       if (!erra)
+               ww_mutex_unlock(&cycle->a_mutex);
        ww_acquire_fini(&ctx);
 
-       cycle->result = err;
+       cycle->result = err ?: erra;
 }
 
 static int __test_cycle(unsigned int nthreads)
index 625bc9897f628bec7abacd797a05bff4b4f2cd85..ad97f3ba5ec51c4a9379228b60416c72e6dc5b60 100644 (file)
@@ -1167,7 +1167,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 
        if (task_cpu(p) != new_cpu) {
                if (p->sched_class->migrate_task_rq)
-                       p->sched_class->migrate_task_rq(p);
+                       p->sched_class->migrate_task_rq(p, new_cpu);
                p->se.nr_migrations++;
                rseq_migrate(p);
                perf_event_task_migrate(p);
index 997ea7b839fa048fece2738fa2a7a38472ca4f01..91e4202b0634569514659aa905fddf5b0ec6a6e7 100644 (file)
@@ -1607,7 +1607,7 @@ out:
        return cpu;
 }
 
-static void migrate_task_rq_dl(struct task_struct *p)
+static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
 {
        struct rq *rq;
 
index f808ddf2a868e7dbfbac8d7bcf5809b876dc51c9..7fc4a371bdd248ee94ffb7f5088bb86b5fa42262 100644 (file)
@@ -1392,6 +1392,17 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
        int last_cpupid, this_cpupid;
 
        this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
+       last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
+
+       /*
+        * Allow first faults or private faults to migrate immediately early in
+        * the lifetime of a task. The magic number 4 is based on waiting for
+        * two full passes of the "multi-stage node selection" test that is
+        * executed below.
+        */
+       if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) &&
+           (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
+               return true;
 
        /*
         * Multi-stage node selection is used in conjunction with a periodic
@@ -1410,7 +1421,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
         * This quadric squishes small probabilities, making it less likely we
         * act on an unlikely task<->page relation.
         */
-       last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
        if (!cpupid_pid_unset(last_cpupid) &&
                                cpupid_to_nid(last_cpupid) != dst_nid)
                return false;
@@ -1514,6 +1524,21 @@ struct task_numa_env {
 static void task_numa_assign(struct task_numa_env *env,
                             struct task_struct *p, long imp)
 {
+       struct rq *rq = cpu_rq(env->dst_cpu);
+
+       /* Bail out if run-queue part of active NUMA balance. */
+       if (xchg(&rq->numa_migrate_on, 1))
+               return;
+
+       /*
+        * Clear previous best_cpu/rq numa-migrate flag, since task now
+        * found a better CPU to move/swap.
+        */
+       if (env->best_cpu != -1) {
+               rq = cpu_rq(env->best_cpu);
+               WRITE_ONCE(rq->numa_migrate_on, 0);
+       }
+
        if (env->best_task)
                put_task_struct(env->best_task);
        if (p)
@@ -1552,6 +1577,13 @@ static bool load_too_imbalanced(long src_load, long dst_load,
        return (imb > old_imb);
 }
 
+/*
+ * Maximum NUMA importance can be 1998 (2*999);
+ * SMALLIMP @ 30 would be close to 1998/64.
+ * Used to deter task migration.
+ */
+#define SMALLIMP       30
+
 /*
  * This checks if the overall compute and NUMA accesses of the system would
  * be improved if the source tasks was migrated to the target dst_cpu taking
@@ -1569,6 +1601,9 @@ static void task_numa_compare(struct task_numa_env *env,
        long moveimp = imp;
        int dist = env->dist;
 
+       if (READ_ONCE(dst_rq->numa_migrate_on))
+               return;
+
        rcu_read_lock();
        cur = task_rcu_dereference(&dst_rq->curr);
        if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
@@ -1582,7 +1617,7 @@ static void task_numa_compare(struct task_numa_env *env,
                goto unlock;
 
        if (!cur) {
-               if (maymove || imp > env->best_imp)
+               if (maymove && moveimp >= env->best_imp)
                        goto assign;
                else
                        goto unlock;
@@ -1625,15 +1660,21 @@ static void task_numa_compare(struct task_numa_env *env,
                               task_weight(cur, env->dst_nid, dist);
        }
 
-       if (imp <= env->best_imp)
-               goto unlock;
-
        if (maymove && moveimp > imp && moveimp > env->best_imp) {
-               imp = moveimp - 1;
+               imp = moveimp;
                cur = NULL;
                goto assign;
        }
 
+       /*
+        * If the NUMA importance is less than SMALLIMP,
+        * task migration might only result in ping pong
+        * of tasks and also hurt performance due to cache
+        * misses.
+        */
+       if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
+               goto unlock;
+
        /*
         * In the overloaded case, try and keep the load balanced.
         */
@@ -1710,6 +1751,7 @@ static int task_numa_migrate(struct task_struct *p)
                .best_cpu = -1,
        };
        struct sched_domain *sd;
+       struct rq *best_rq;
        unsigned long taskweight, groupweight;
        int nid, ret, dist;
        long taskimp, groupimp;
@@ -1805,20 +1847,17 @@ static int task_numa_migrate(struct task_struct *p)
        if (env.best_cpu == -1)
                return -EAGAIN;
 
-       /*
-        * Reset the scan period if the task is being rescheduled on an
-        * alternative node to recheck if the tasks is now properly placed.
-        */
-       p->numa_scan_period = task_scan_start(p);
-
+       best_rq = cpu_rq(env.best_cpu);
        if (env.best_task == NULL) {
                ret = migrate_task_to(p, env.best_cpu);
+               WRITE_ONCE(best_rq->numa_migrate_on, 0);
                if (ret != 0)
                        trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
                return ret;
        }
 
        ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
+       WRITE_ONCE(best_rq->numa_migrate_on, 0);
 
        if (ret != 0)
                trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
@@ -2596,6 +2635,39 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr)
        }
 }
 
+static void update_scan_period(struct task_struct *p, int new_cpu)
+{
+       int src_nid = cpu_to_node(task_cpu(p));
+       int dst_nid = cpu_to_node(new_cpu);
+
+       if (!static_branch_likely(&sched_numa_balancing))
+               return;
+
+       if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
+               return;
+
+       if (src_nid == dst_nid)
+               return;
+
+       /*
+        * Allow resets if faults have been trapped before one scan
+        * has completed. This is most likely due to a new task that
+        * is pulled cross-node due to wakeups or load balancing.
+        */
+       if (p->numa_scan_seq) {
+               /*
+                * Avoid scan adjustments if moving to the preferred
+                * node or if the task was not previously running on
+                * the preferred node.
+                */
+               if (dst_nid == p->numa_preferred_nid ||
+                   (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid))
+                       return;
+       }
+
+       p->numa_scan_period = task_scan_start(p);
+}
+
 #else
 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
 {
@@ -2609,6 +2681,10 @@ static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
 {
 }
 
+static inline void update_scan_period(struct task_struct *p, int new_cpu)
+{
+}
+
 #endif /* CONFIG_NUMA_BALANCING */
 
 static void
@@ -6275,7 +6351,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
  * cfs_rq_of(p) references at time of call are still valid and identify the
  * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
  */
-static void migrate_task_rq_fair(struct task_struct *p)
+static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
 {
        /*
         * As blocked tasks retain absolute vruntime the migration needs to
@@ -6328,6 +6404,8 @@ static void migrate_task_rq_fair(struct task_struct *p)
 
        /* We have migrated, no longer consider this task hot */
        p->se.exec_start = 0;
+
+       update_scan_period(p, new_cpu);
 }
 
 static void task_dead_fair(struct task_struct *p)
index 4a2e8cae63c41111672a898d3955008cd345c69b..455fa330de0462db774f827a726478aa66abad3b 100644 (file)
@@ -783,6 +783,7 @@ struct rq {
 #ifdef CONFIG_NUMA_BALANCING
        unsigned int            nr_numa_running;
        unsigned int            nr_preferred_running;
+       unsigned int            numa_migrate_on;
 #endif
        #define CPU_LOAD_IDX_MAX 5
        unsigned long           cpu_load[CPU_LOAD_IDX_MAX];
@@ -1523,7 +1524,7 @@ struct sched_class {
 
 #ifdef CONFIG_SMP
        int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
-       void (*migrate_task_rq)(struct task_struct *p);
+       void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
 
        void (*task_woken)(struct rq *this_rq, struct task_struct *task);
 
index bb6fe5ed4ecf5682716d04181afc9a7c6a7f9b30..d26de6156b97db73803cb617bcd81fd7415ac7b1 100644 (file)
@@ -45,12 +45,11 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
 };
 
 static int validate_nla_bitfield32(const struct nlattr *nla,
-                                  u32 *valid_flags_allowed)
+                                  const u32 *valid_flags_mask)
 {
        const struct nla_bitfield32 *bf = nla_data(nla);
-       u32 *valid_flags_mask = valid_flags_allowed;
 
-       if (!valid_flags_allowed)
+       if (!valid_flags_mask)
                return -EINVAL;
 
        /*disallow invalid bit selector */
@@ -68,12 +67,99 @@ static int validate_nla_bitfield32(const struct nlattr *nla,
        return 0;
 }
 
+static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
+                             const struct nla_policy *policy,
+                             struct netlink_ext_ack *extack)
+{
+       const struct nlattr *entry;
+       int rem;
+
+       nla_for_each_attr(entry, head, len, rem) {
+               int ret;
+
+               if (nla_len(entry) == 0)
+                       continue;
+
+               if (nla_len(entry) < NLA_HDRLEN) {
+                       NL_SET_ERR_MSG_ATTR(extack, entry,
+                                           "Array element too short");
+                       return -ERANGE;
+               }
+
+               ret = nla_validate(nla_data(entry), nla_len(entry),
+                                  maxtype, policy, extack);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int nla_validate_int_range(const struct nla_policy *pt,
+                                 const struct nlattr *nla,
+                                 struct netlink_ext_ack *extack)
+{
+       bool validate_min, validate_max;
+       s64 value;
+
+       validate_min = pt->validation_type == NLA_VALIDATE_RANGE ||
+                      pt->validation_type == NLA_VALIDATE_MIN;
+       validate_max = pt->validation_type == NLA_VALIDATE_RANGE ||
+                      pt->validation_type == NLA_VALIDATE_MAX;
+
+       switch (pt->type) {
+       case NLA_U8:
+               value = nla_get_u8(nla);
+               break;
+       case NLA_U16:
+               value = nla_get_u16(nla);
+               break;
+       case NLA_U32:
+               value = nla_get_u32(nla);
+               break;
+       case NLA_S8:
+               value = nla_get_s8(nla);
+               break;
+       case NLA_S16:
+               value = nla_get_s16(nla);
+               break;
+       case NLA_S32:
+               value = nla_get_s32(nla);
+               break;
+       case NLA_S64:
+               value = nla_get_s64(nla);
+               break;
+       case NLA_U64:
+               /* treat this one specially, since it may not fit into s64 */
+               if ((validate_min && nla_get_u64(nla) < pt->min) ||
+                   (validate_max && nla_get_u64(nla) > pt->max)) {
+                       NL_SET_ERR_MSG_ATTR(extack, nla,
+                                           "integer out of range");
+                       return -ERANGE;
+               }
+               return 0;
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       if ((validate_min && value < pt->min) ||
+           (validate_max && value > pt->max)) {
+               NL_SET_ERR_MSG_ATTR(extack, nla,
+                                   "integer out of range");
+               return -ERANGE;
+       }
+
+       return 0;
+}
+
 static int validate_nla(const struct nlattr *nla, int maxtype,
                        const struct nla_policy *policy,
-                       const char **error_msg)
+                       struct netlink_ext_ack *extack)
 {
        const struct nla_policy *pt;
        int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
+       int err = -ERANGE;
 
        if (type <= 0 || type > maxtype)
                return 0;
@@ -91,24 +177,31 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
        switch (pt->type) {
        case NLA_EXACT_LEN:
                if (attrlen != pt->len)
-                       return -ERANGE;
+                       goto out_err;
                break;
 
        case NLA_REJECT:
-               if (pt->validation_data && error_msg)
-                       *error_msg = pt->validation_data;
-               return -EINVAL;
+               if (extack && pt->validation_data) {
+                       NL_SET_BAD_ATTR(extack, nla);
+                       extack->_msg = pt->validation_data;
+                       return -EINVAL;
+               }
+               err = -EINVAL;
+               goto out_err;
 
        case NLA_FLAG:
                if (attrlen > 0)
-                       return -ERANGE;
+                       goto out_err;
                break;
 
        case NLA_BITFIELD32:
                if (attrlen != sizeof(struct nla_bitfield32))
-                       return -ERANGE;
+                       goto out_err;
 
-               return validate_nla_bitfield32(nla, pt->validation_data);
+               err = validate_nla_bitfield32(nla, pt->validation_data);
+               if (err)
+                       goto out_err;
+               break;
 
        case NLA_NUL_STRING:
                if (pt->len)
@@ -116,13 +209,15 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
                else
                        minlen = attrlen;
 
-               if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL)
-                       return -EINVAL;
+               if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) {
+                       err = -EINVAL;
+                       goto out_err;
+               }
                /* fall through */
 
        case NLA_STRING:
                if (attrlen < 1)
-                       return -ERANGE;
+                       goto out_err;
 
                if (pt->len) {
                        char *buf = nla_data(nla);
@@ -131,32 +226,58 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
                                attrlen--;
 
                        if (attrlen > pt->len)
-                               return -ERANGE;
+                               goto out_err;
                }
                break;
 
        case NLA_BINARY:
                if (pt->len && attrlen > pt->len)
-                       return -ERANGE;
+                       goto out_err;
                break;
 
-       case NLA_NESTED_COMPAT:
-               if (attrlen < pt->len)
-                       return -ERANGE;
-               if (attrlen < NLA_ALIGN(pt->len))
-                       break;
-               if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
-                       return -ERANGE;
-               nla = nla_data(nla) + NLA_ALIGN(pt->len);
-               if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
-                       return -ERANGE;
-               break;
        case NLA_NESTED:
                /* a nested attributes is allowed to be empty; if its not,
                 * it must have a size of at least NLA_HDRLEN.
                 */
                if (attrlen == 0)
                        break;
+               if (attrlen < NLA_HDRLEN)
+                       goto out_err;
+               if (pt->validation_data) {
+                       err = nla_validate(nla_data(nla), nla_len(nla), pt->len,
+                                          pt->validation_data, extack);
+                       if (err < 0) {
+                               /*
+                                * return directly to preserve the inner
+                                * error message/attribute pointer
+                                */
+                               return err;
+                       }
+               }
+               break;
+       case NLA_NESTED_ARRAY:
+               /* a nested array attribute is allowed to be empty; if its not,
+                * it must have a size of at least NLA_HDRLEN.
+                */
+               if (attrlen == 0)
+                       break;
+               if (attrlen < NLA_HDRLEN)
+                       goto out_err;
+               if (pt->validation_data) {
+                       int err;
+
+                       err = nla_validate_array(nla_data(nla), nla_len(nla),
+                                                pt->len, pt->validation_data,
+                                                extack);
+                       if (err < 0) {
+                               /*
+                                * return directly to preserve the inner
+                                * error message/attribute pointer
+                                */
+                               return err;
+                       }
+               }
+               break;
        default:
                if (pt->len)
                        minlen = pt->len;
@@ -164,10 +285,34 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
                        minlen = nla_attr_minlen[pt->type];
 
                if (attrlen < minlen)
-                       return -ERANGE;
+                       goto out_err;
+       }
+
+       /* further validation */
+       switch (pt->validation_type) {
+       case NLA_VALIDATE_NONE:
+               /* nothing to do */
+               break;
+       case NLA_VALIDATE_RANGE:
+       case NLA_VALIDATE_MIN:
+       case NLA_VALIDATE_MAX:
+               err = nla_validate_int_range(pt, nla, extack);
+               if (err)
+                       return err;
+               break;
+       case NLA_VALIDATE_FUNCTION:
+               if (pt->validate) {
+                       err = pt->validate(nla, extack);
+                       if (err)
+                               return err;
+               }
+               break;
        }
 
        return 0;
+out_err:
+       NL_SET_ERR_MSG_ATTR(extack, nla, "Attribute failed policy validation");
+       return err;
 }
 
 /**
@@ -192,12 +337,10 @@ int nla_validate(const struct nlattr *head, int len, int maxtype,
        int rem;
 
        nla_for_each_attr(nla, head, len, rem) {
-               int err = validate_nla(nla, maxtype, policy, NULL);
+               int err = validate_nla(nla, maxtype, policy, extack);
 
-               if (err < 0) {
-                       NL_SET_BAD_ATTR(extack, nla);
+               if (err < 0)
                        return err;
-               }
        }
 
        return 0;
@@ -248,46 +391,63 @@ EXPORT_SYMBOL(nla_policy_len);
  *
  * Returns 0 on success or a negative error code.
  */
-int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
-             int len, const struct nla_policy *policy,
-             struct netlink_ext_ack *extack)
+static int __nla_parse(struct nlattr **tb, int maxtype,
+                      const struct nlattr *head, int len,
+                      bool strict, const struct nla_policy *policy,
+                      struct netlink_ext_ack *extack)
 {
        const struct nlattr *nla;
-       int rem, err;
+       int rem;
 
        memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
 
        nla_for_each_attr(nla, head, len, rem) {
                u16 type = nla_type(nla);
 
-               if (type > 0 && type <= maxtype) {
-                       static const char _msg[] = "Attribute failed policy validation";
-                       const char *msg = _msg;
-
-                       if (policy) {
-                               err = validate_nla(nla, maxtype, policy, &msg);
-                               if (err < 0) {
-                                       NL_SET_BAD_ATTR(extack, nla);
-                                       if (extack)
-                                               extack->_msg = msg;
-                                       goto errout;
-                               }
+               if (type == 0 || type > maxtype) {
+                       if (strict) {
+                               NL_SET_ERR_MSG(extack, "Unknown attribute type");
+                               return -EINVAL;
                        }
+                       continue;
+               }
+               if (policy) {
+                       int err = validate_nla(nla, maxtype, policy, extack);
 
-                       tb[type] = (struct nlattr *)nla;
+                       if (err < 0)
+                               return err;
                }
+
+               tb[type] = (struct nlattr *)nla;
        }
 
-       if (unlikely(rem > 0))
+       if (unlikely(rem > 0)) {
                pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
                                    rem, current->comm);
+               NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes");
+               if (strict)
+                       return -EINVAL;
+       }
 
-       err = 0;
-errout:
-       return err;
+       return 0;
+}
+
+int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+             int len, const struct nla_policy *policy,
+             struct netlink_ext_ack *extack)
+{
+       return __nla_parse(tb, maxtype, head, len, false, policy, extack);
 }
 EXPORT_SYMBOL(nla_parse);
 
+int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head,
+                    int len, const struct nla_policy *policy,
+                    struct netlink_ext_ack *extack)
+{
+       return __nla_parse(tb, maxtype, head, len, true, policy, extack);
+}
+EXPORT_SYMBOL(nla_parse_strict);
+
 /**
  * nla_find - Find a specific attribute in a stream of attributes
  * @head: head of attribute stream
index 6a473709e9b6b953393ea12215a38c9e47c9f849..7405c9d89d65134c003a2d3984e1a993fd9c9907 100644 (file)
@@ -19,7 +19,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
                struct gup_benchmark *gup)
 {
        ktime_t start_time, end_time;
-       unsigned long i, nr, nr_pages, addr, next;
+       unsigned long i, nr_pages, addr, next;
+       int nr;
        struct page **pages;
 
        nr_pages = gup->size / PAGE_SIZE;
index 533f9b00147d267644bcbf98da717329fb07c38f..00704060b7f79242d324af81592b0afc6384ae08 100644 (file)
@@ -2931,7 +2931,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
        else
                page_add_file_rmap(new, true);
        set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
-       if (vma->vm_flags & VM_LOCKED)
+       if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
                mlock_vma_page(new);
        update_mmu_cache_pmd(vma, address, pvmw->pmd);
 }
index 3c21775f196b2f38a25ed05687791a4c79a3d3d4..5c390f5a5207b5c0b4d1d55524d2f655c7edcae1 100644 (file)
@@ -3326,8 +3326,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
        struct page *page;
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
-       const unsigned long mmun_start = start; /* For mmu_notifiers */
-       const unsigned long mmun_end   = end;   /* For mmu_notifiers */
+       unsigned long mmun_start = start;       /* For mmu_notifiers */
+       unsigned long mmun_end   = end;         /* For mmu_notifiers */
 
        WARN_ON(!is_vm_hugetlb_page(vma));
        BUG_ON(start & ~huge_page_mask(h));
@@ -3339,6 +3339,11 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
         */
        tlb_remove_check_page_size_change(tlb, sz);
        tlb_start_vma(tlb, vma);
+
+       /*
+        * If sharing possible, alert mmu notifiers of worst case.
+        */
+       adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        address = start;
        for (; address < end; address += sz) {
@@ -3349,6 +3354,10 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                ptl = huge_pte_lock(h, mm, ptep);
                if (huge_pmd_unshare(mm, &address, ptep)) {
                        spin_unlock(ptl);
+                       /*
+                        * We just unmapped a page of PMDs by clearing a PUD.
+                        * The caller's TLB flush range should cover this area.
+                        */
                        continue;
                }
 
@@ -3431,12 +3440,23 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 {
        struct mm_struct *mm;
        struct mmu_gather tlb;
+       unsigned long tlb_start = start;
+       unsigned long tlb_end = end;
+
+       /*
+        * If shared PMDs were possibly used within this vma range, adjust
+        * start/end for worst case tlb flushing.
+        * Note that we can not be sure if PMDs are shared until we try to
+        * unmap pages.  However, we want to make sure TLB flushing covers
+        * the largest possible range.
+        */
+       adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
 
        mm = vma->vm_mm;
 
-       tlb_gather_mmu(&tlb, mm, start, end);
+       tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
        __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
-       tlb_finish_mmu(&tlb, start, end);
+       tlb_finish_mmu(&tlb, tlb_start, tlb_end);
 }
 
 /*
@@ -4298,11 +4318,21 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
        pte_t pte;
        struct hstate *h = hstate_vma(vma);
        unsigned long pages = 0;
+       unsigned long f_start = start;
+       unsigned long f_end = end;
+       bool shared_pmd = false;
+
+       /*
+        * In the case of shared PMDs, the area to flush could be beyond
+        * start/end.  Set f_start/f_end to cover the maximum possible
+        * range if PMD sharing is possible.
+        */
+       adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
 
        BUG_ON(address >= end);
-       flush_cache_range(vma, address, end);
+       flush_cache_range(vma, f_start, f_end);
 
-       mmu_notifier_invalidate_range_start(mm, start, end);
+       mmu_notifier_invalidate_range_start(mm, f_start, f_end);
        i_mmap_lock_write(vma->vm_file->f_mapping);
        for (; address < end; address += huge_page_size(h)) {
                spinlock_t *ptl;
@@ -4313,6 +4343,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                if (huge_pmd_unshare(mm, &address, ptep)) {
                        pages++;
                        spin_unlock(ptl);
+                       shared_pmd = true;
                        continue;
                }
                pte = huge_ptep_get(ptep);
@@ -4348,9 +4379,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
         * may have cleared our pud entry and done put_page on the page table:
         * once we release i_mmap_rwsem, another task can do the final put_page
-        * and that page table be reused and filled with junk.
+        * and that page table be reused and filled with junk.  If we actually
+        * did unshare a page of pmds, flush the range corresponding to the pud.
         */
-       flush_hugetlb_tlb_range(vma, start, end);
+       if (shared_pmd)
+               flush_hugetlb_tlb_range(vma, f_start, f_end);
+       else
+               flush_hugetlb_tlb_range(vma, start, end);
        /*
         * No need to call mmu_notifier_invalidate_range() we are downgrading
         * page table protection not changing it to point to a new page.
@@ -4358,7 +4393,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         * See Documentation/vm/mmu_notifier.rst
         */
        i_mmap_unlock_write(vma->vm_file->f_mapping);
-       mmu_notifier_invalidate_range_end(mm, start, end);
+       mmu_notifier_invalidate_range_end(mm, f_start, f_end);
 
        return pages << h->order;
 }
@@ -4545,12 +4580,40 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
        /*
         * check on proper vm_flags and page table alignment
         */
-       if (vma->vm_flags & VM_MAYSHARE &&
-           vma->vm_start <= base && end <= vma->vm_end)
+       if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
                return true;
        return false;
 }
 
+/*
+ * Determine if start,end range within vma could be mapped by shared pmd.
+ * If yes, adjust start and end to cover range associated with possible
+ * shared pmd mappings.
+ */
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+{
+       unsigned long check_addr = *start;
+
+       if (!(vma->vm_flags & VM_MAYSHARE))
+               return;
+
+       for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
+               unsigned long a_start = check_addr & PUD_MASK;
+               unsigned long a_end = a_start + PUD_SIZE;
+
+               /*
+                * If sharing is possible, adjust start/end if necessary.
+                */
+               if (range_in_vma(vma, a_start, a_end)) {
+                       if (a_start < *start)
+                               *start = a_start;
+                       if (a_end > *end)
+                               *end = a_end;
+               }
+       }
+}
+
 /*
  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
  * and returns the corresponding pte. While this is not necessary for the
@@ -4648,6 +4711,11 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 {
        return 0;
 }
+
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+{
+}
 #define want_pmd_share()       (0)
 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
 
index 972a9eaa898b6ad889a4647ed207ad82bd5d0f4b..71d21df2a3f362cc370c8ff158fb03dfa9d2d03c 100644 (file)
@@ -96,7 +96,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
                new_flags |= VM_DONTDUMP;
                break;
        case MADV_DODUMP:
-               if (new_flags & VM_SPECIAL) {
+               if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
                        error = -EINVAL;
                        goto out;
                }
index d6a2e89b086a43d77f155f6b525fc15326d9c035..84381b55b2bd5c535bd181b7670a69f37bb084a4 100644 (file)
@@ -275,6 +275,9 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
                if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
                        mlock_vma_page(new);
 
+               if (PageTransHuge(page) && PageMlocked(page))
+                       clear_page_mlock(page);
+
                /* No need to invalidate - it was non-present before */
                update_mmu_cache(vma, pvmw.address, pvmw.pte);
        }
@@ -1411,7 +1414,7 @@ retry:
                                 * we encounter them after the rest of the list
                                 * is processed.
                                 */
-                               if (PageTransHuge(page)) {
+                               if (PageTransHuge(page) && !PageHuge(page)) {
                                        lock_page(page);
                                        rc = split_huge_page_to_list(page, from);
                                        unlock_page(page);
@@ -1855,46 +1858,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
        return newpage;
 }
 
-/*
- * page migration rate limiting control.
- * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
- * window of time. Default here says do not migrate more than 1280M per second.
- */
-static unsigned int migrate_interval_millisecs __read_mostly = 100;
-static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
-
-/* Returns true if the node is migrate rate-limited after the update */
-static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
-                                       unsigned long nr_pages)
-{
-       /*
-        * Rate-limit the amount of data that is being migrated to a node.
-        * Optimal placement is no good if the memory bus is saturated and
-        * all the time is being spent migrating!
-        */
-       if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
-               spin_lock(&pgdat->numabalancing_migrate_lock);
-               pgdat->numabalancing_migrate_nr_pages = 0;
-               pgdat->numabalancing_migrate_next_window = jiffies +
-                       msecs_to_jiffies(migrate_interval_millisecs);
-               spin_unlock(&pgdat->numabalancing_migrate_lock);
-       }
-       if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
-               trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
-                                                               nr_pages);
-               return true;
-       }
-
-       /*
-        * This is an unlocked non-atomic update so errors are possible.
-        * The consequences are failing to migrate when we potentiall should
-        * have which is not severe enough to warrant locking. If it is ever
-        * a problem, it can be converted to a per-cpu counter.
-        */
-       pgdat->numabalancing_migrate_nr_pages += nr_pages;
-       return false;
-}
-
 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
 {
        int page_lru;
@@ -1967,14 +1930,6 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
        if (page_is_file_cache(page) && PageDirty(page))
                goto out;
 
-       /*
-        * Rate-limit the amount of data that is being migrated to a node.
-        * Optimal placement is no good if the memory bus is saturated and
-        * all the time is being spent migrating!
-        */
-       if (numamigrate_update_ratelimit(pgdat, 1))
-               goto out;
-
        isolated = numamigrate_isolate_page(pgdat, page);
        if (!isolated)
                goto out;
@@ -2021,14 +1976,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        unsigned long mmun_start = address & HPAGE_PMD_MASK;
        unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
 
-       /*
-        * Rate-limit the amount of data that is being migrated to a node.
-        * Optimal placement is no good if the memory bus is saturated and
-        * all the time is being spent migrating!
-        */
-       if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
-               goto out_dropref;
-
        new_page = alloc_pages_node(node,
                (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
                HPAGE_PMD_ORDER);
@@ -2125,7 +2072,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 
 out_fail:
        count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
-out_dropref:
        ptl = pmd_lock(mm, pmd);
        if (pmd_same(*pmd, entry)) {
                entry = pmd_modify(entry, vma->vm_page_prot);
index 89d2a2ab3fe68c3ae46104074c519c7500dd86cb..706a738c0aeed26a904e43a7a1d5d1a45453f721 100644 (file)
@@ -6197,8 +6197,6 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
 static void pgdat_init_numabalancing(struct pglist_data *pgdat)
 {
        spin_lock_init(&pgdat->numabalancing_migrate_lock);
-       pgdat->numabalancing_migrate_nr_pages = 0;
-       pgdat->numabalancing_migrate_next_window = jiffies;
 }
 #else
 static void pgdat_init_numabalancing(struct pglist_data *pgdat) {}
index eb477809a5c0a534e2977f6fd6c1df74a05bc170..1e79fac3186b63208cbe37a8c05597c44d2234c9 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1362,11 +1362,21 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        }
 
        /*
-        * We have to assume the worse case ie pmd for invalidation. Note that
-        * the page can not be free in this function as call of try_to_unmap()
-        * must hold a reference on the page.
+        * For THP, we have to assume the worse case ie pmd for invalidation.
+        * For hugetlb, it could be much worse if we need to do pud
+        * invalidation in the case of pmd sharing.
+        *
+        * Note that the page can not be free in this function as call of
+        * try_to_unmap() must hold a reference on the page.
         */
        end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
+       if (PageHuge(page)) {
+               /*
+                * If sharing is possible, start and end will be adjusted
+                * accordingly.
+                */
+               adjust_range_if_pmd_sharing_possible(vma, &start, &end);
+       }
        mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
 
        while (page_vma_mapped_walk(&pvmw)) {
@@ -1409,6 +1419,32 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
                address = pvmw.address;
 
+               if (PageHuge(page)) {
+                       if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
+                               /*
+                                * huge_pmd_unshare unmapped an entire PMD
+                                * page.  There is no way of knowing exactly
+                                * which PMDs may be cached for this mm, so
+                                * we must flush them all.  start/end were
+                                * already adjusted above to cover this range.
+                                */
+                               flush_cache_range(vma, start, end);
+                               flush_tlb_range(vma, start, end);
+                               mmu_notifier_invalidate_range(mm, start, end);
+
+                               /*
+                                * The ref count of the PMD page was dropped
+                                * which is part of the way map counting
+                                * is done for shared PMDs.  Return 'true'
+                                * here.  When there is no other sharing,
+                                * huge_pmd_unshare returns false and we will
+                                * unmap the actual page and drop map count
+                                * to zero.
+                                */
+                               page_vma_mapped_walk_done(&pvmw);
+                               break;
+                       }
+               }
 
                if (IS_ENABLED(CONFIG_MIGRATION) &&
                    (flags & TTU_MIGRATION) &&
index c7ce2c1612259c45896bae9739a288966a28c970..c5ef7240cbcbba05b4ef759b3c05d1ffdb163369 100644 (file)
@@ -580,8 +580,8 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
                        struct mem_cgroup *memcg, int priority)
 {
        struct memcg_shrinker_map *map;
-       unsigned long freed = 0;
-       int ret, i;
+       unsigned long ret, freed = 0;
+       int i;
 
        if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
                return 0;
@@ -677,9 +677,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
                                 struct mem_cgroup *memcg,
                                 int priority)
 {
+       unsigned long ret, freed = 0;
        struct shrinker *shrinker;
-       unsigned long freed = 0;
-       int ret;
 
        if (!mem_cgroup_is_root(memcg))
                return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
index 8ba0870ecddd0fd592d16ee674b060db512b5b37..7878da76abf2d21992b322ba2a3e65386a215467 100644 (file)
@@ -1275,6 +1275,9 @@ const char * const vmstat_text[] = {
 #ifdef CONFIG_SMP
        "nr_tlb_remote_flush",
        "nr_tlb_remote_flush_received",
+#else
+       "", /* nr_tlb_remote_flush */
+       "", /* nr_tlb_remote_flush_received */
 #endif /* CONFIG_SMP */
        "nr_tlb_local_flush_all",
        "nr_tlb_local_flush_one",
@@ -1283,7 +1286,6 @@ const char * const vmstat_text[] = {
 #ifdef CONFIG_DEBUG_VM_VMACACHE
        "vmacache_find_calls",
        "vmacache_find_hits",
-       "vmacache_full_flushes",
 #endif
 #ifdef CONFIG_SWAP
        "swap_ra",
index 7b3965861013c4ed85f818efaff54336a3dc84fe..43c284158f63ee6e73ec52db1601b40b65b32c54 100644 (file)
@@ -489,9 +489,6 @@ static int bnep_session(void *arg)
 
        add_wait_queue(sk_sleep(sk), &wait);
        while (1) {
-               /* Ensure session->terminate is updated */
-               smp_mb__before_atomic();
-
                if (atomic_read(&s->terminate))
                        break;
                /* RX */
@@ -512,6 +509,10 @@ static int bnep_session(void *arg)
                                break;
                netif_wake_queue(dev);
 
+               /*
+                * wait_woken() performs the necessary memory barriers
+                * for us; see the header comment for this primitive.
+                */
                wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
        }
        remove_wait_queue(sk_sleep(sk), &wait);
index 7f26a5a19ff6d8fb87ef5404e32eb5e2ce0b5b53..07cfa3249f83ae6226d06aae59f27ce6b41a84e8 100644 (file)
@@ -288,9 +288,6 @@ static int cmtp_session(void *arg)
 
        add_wait_queue(sk_sleep(sk), &wait);
        while (1) {
-               /* Ensure session->terminate is updated */
-               smp_mb__before_atomic();
-
                if (atomic_read(&session->terminate))
                        break;
                if (sk->sk_state != BT_CONNECTED)
@@ -306,6 +303,10 @@ static int cmtp_session(void *arg)
 
                cmtp_process_transmit(session);
 
+               /*
+                * wait_woken() performs the necessary memory barriers
+                * for us; see the header comment for this primitive.
+                */
                wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
        }
        remove_wait_queue(sk_sleep(sk), &wait);
@@ -431,9 +432,10 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
                /* Stop session thread */
                atomic_inc(&session->terminate);
 
-               /* Ensure session->terminate is updated */
-               smp_mb__after_atomic();
-
+               /*
+                * See the comment preceding the call to wait_woken()
+                * in cmtp_session().
+                */
                wake_up_interruptible(sk_sleep(session->sock->sk));
        } else
                err = -ENOENT;
index 74b29c7d841c66192b8d697c845b24d2c2a5b820..7352fe85674be5c3e73ef50ca3702caec7a69e6c 100644 (file)
@@ -2839,6 +2839,20 @@ struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
        return NULL;
 }
 
+struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
+                               struct list_head *bdaddr_list, bdaddr_t *bdaddr,
+                               u8 type)
+{
+       struct bdaddr_list_with_irk *b;
+
+       list_for_each_entry(b, bdaddr_list, list) {
+               if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
+                       return b;
+       }
+
+       return NULL;
+}
+
 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
 {
        struct bdaddr_list *b, *n;
@@ -2871,6 +2885,35 @@ int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
        return 0;
 }
 
+int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
+                                       u8 type, u8 *peer_irk, u8 *local_irk)
+{
+       struct bdaddr_list_with_irk *entry;
+
+       if (!bacmp(bdaddr, BDADDR_ANY))
+               return -EBADF;
+
+       if (hci_bdaddr_list_lookup(list, bdaddr, type))
+               return -EEXIST;
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       bacpy(&entry->bdaddr, bdaddr);
+       entry->bdaddr_type = type;
+
+       if (peer_irk)
+               memcpy(entry->peer_irk, peer_irk, 16);
+
+       if (local_irk)
+               memcpy(entry->local_irk, local_irk, 16);
+
+       list_add(&entry->list, list);
+
+       return 0;
+}
+
 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
 {
        struct bdaddr_list *entry;
@@ -2890,6 +2933,26 @@ int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
        return 0;
 }
 
+int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
+                                                       u8 type)
+{
+       struct bdaddr_list_with_irk *entry;
+
+       if (!bacmp(bdaddr, BDADDR_ANY)) {
+               hci_bdaddr_list_clear(list);
+               return 0;
+       }
+
+       entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
+       if (!entry)
+               return -ENOENT;
+
+       list_del(&entry->list);
+       kfree(entry);
+
+       return 0;
+}
+
 /* This function requires the caller holds hdev->lock */
 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
                                               bdaddr_t *addr, u8 addr_type)
@@ -3084,6 +3147,8 @@ struct hci_dev *hci_alloc_dev(void)
        hdev->le_max_tx_time = 0x0148;
        hdev->le_max_rx_len = 0x001b;
        hdev->le_max_rx_time = 0x0148;
+       hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
+       hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
        hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
        hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
 
index f12555f23a49a025563a6f11f174a0be71556a75..f47f8fad757ac97db775ad05fb3db3ce6285d336 100644 (file)
@@ -1454,6 +1454,45 @@ static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
        hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
 }
 
+static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
+                                        struct sk_buff *skb)
+{
+       struct hci_cp_le_add_to_resolv_list *sent;
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       if (status)
+               return;
+
+       sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
+       if (!sent)
+               return;
+
+       hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
+                               sent->bdaddr_type, sent->peer_irk,
+                               sent->local_irk);
+}
+
+static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
+                                         struct sk_buff *skb)
+{
+       struct hci_cp_le_del_from_resolv_list *sent;
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       if (status)
+               return;
+
+       sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
+       if (!sent)
+               return;
+
+       hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
+                           sent->bdaddr_type);
+}
+
 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
                                       struct sk_buff *skb)
 {
@@ -3279,6 +3318,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
                hci_cc_le_write_def_data_len(hdev, skb);
                break;
 
+       case HCI_OP_LE_ADD_TO_RESOLV_LIST:
+               hci_cc_le_add_to_resolv_list(hdev, skb);
+               break;
+
+       case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
+               hci_cc_le_del_from_resolv_list(hdev, skb);
+               break;
+
        case HCI_OP_LE_CLEAR_RESOLV_LIST:
                hci_cc_le_clear_resolv_list(hdev, skb);
                break;
index 253975cce943e5728f0fdbb0c47741516defa6b1..3734dc1788b44ee0d06b75292e58e35958181fe8 100644 (file)
@@ -1074,6 +1074,10 @@ static int hidp_session_start_sync(struct hidp_session *session)
 static void hidp_session_terminate(struct hidp_session *session)
 {
        atomic_inc(&session->terminate);
+       /*
+        * See the comment preceding the call to wait_woken()
+        * in hidp_session_run().
+        */
        wake_up_interruptible(&hidp_session_wq);
 }
 
@@ -1193,8 +1197,6 @@ static void hidp_session_run(struct hidp_session *session)
                 *    thread is woken up by ->sk_state_changed().
                 */
 
-               /* Ensure session->terminate is updated */
-               smp_mb__before_atomic();
                if (atomic_read(&session->terminate))
                        break;
 
@@ -1228,14 +1230,15 @@ static void hidp_session_run(struct hidp_session *session)
                hidp_process_transmit(session, &session->ctrl_transmit,
                                      session->ctrl_sock);
 
+               /*
+                * wait_woken() performs the necessary memory barriers
+                * for us; see the header comment for this primitive.
+                */
                wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
        }
        remove_wait_queue(&hidp_session_wq, &wait);
 
        atomic_inc(&session->terminate);
-
-       /* Ensure session->terminate is updated */
-       smp_mb__after_atomic();
 }
 
 static int hidp_session_wake_function(wait_queue_entry_t *wait,
index d17a4736e47c0abf6fe3be7df34f60bd1f32f1fb..514899f7f0d4bfa12b8dcc7424a0379b9a87891c 100644 (file)
@@ -51,9 +51,6 @@ static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
 static LIST_HEAD(chan_list);
 static DEFINE_RWLOCK(chan_list_lock);
 
-static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
-static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
-
 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
                                       u8 code, u8 ident, u16 dlen, void *data);
 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
@@ -519,8 +516,10 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
        chan->sdu_last_frag = NULL;
        chan->sdu_len = 0;
        chan->tx_credits = 0;
-       chan->rx_credits = le_max_credits;
-       chan->mps = min_t(u16, chan->imtu, le_default_mps);
+       /* Derive MPS from connection MTU to stop HCI fragmentation */
+       chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
+       /* Give enough credits for a full packet */
+       chan->rx_credits = (chan->imtu / chan->mps) + 1;
 
        skb_queue_head_init(&chan->tx_q);
 }
@@ -1282,6 +1281,8 @@ static void l2cap_le_connect(struct l2cap_chan *chan)
        if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
                return;
 
+       l2cap_le_flowctl_init(chan);
+
        req.psm     = chan->psm;
        req.scid    = cpu_to_le16(chan->scid);
        req.mtu     = cpu_to_le16(chan->imtu);
@@ -5493,8 +5494,6 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
                goto response_unlock;
        }
 
-       l2cap_le_flowctl_init(chan);
-
        bacpy(&chan->src, &conn->hcon->src);
        bacpy(&chan->dst, &conn->hcon->dst);
        chan->src_type = bdaddr_src_type(conn->hcon);
@@ -5506,6 +5505,9 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
        chan->tx_credits = __le16_to_cpu(req->credits);
 
        __l2cap_chan_add(conn, chan);
+
+       l2cap_le_flowctl_init(chan);
+
        dcid = chan->scid;
        credits = chan->rx_credits;
 
@@ -6699,13 +6701,10 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
        struct l2cap_le_credits pkt;
        u16 return_credits;
 
-       /* We return more credits to the sender only after the amount of
-        * credits falls below half of the initial amount.
-        */
-       if (chan->rx_credits >= (le_max_credits + 1) / 2)
-               return;
+       return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
 
-       return_credits = le_max_credits - chan->rx_credits;
+       if (!return_credits)
+               return;
 
        BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
 
@@ -6719,6 +6718,21 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
        l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
 }
 
+static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+       int err;
+
+       BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
+
+       /* Wait recv to confirm reception before updating the credits */
+       err = chan->ops->recv(chan, skb);
+
+       /* Update credits whenever an SDU is received */
+       l2cap_chan_le_send_credits(chan);
+
+       return err;
+}
+
 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
 {
        int err;
@@ -6737,7 +6751,11 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
        chan->rx_credits--;
        BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
 
-       l2cap_chan_le_send_credits(chan);
+       /* Update if remote had run out of credits, this should only happens
+        * if the remote is not using the entire MPS.
+        */
+       if (!chan->rx_credits)
+               l2cap_chan_le_send_credits(chan);
 
        err = 0;
 
@@ -6763,12 +6781,22 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
                }
 
                if (skb->len == sdu_len)
-                       return chan->ops->recv(chan, skb);
+                       return l2cap_le_recv(chan, skb);
 
                chan->sdu = skb;
                chan->sdu_len = sdu_len;
                chan->sdu_last_frag = skb;
 
+               /* Detect if remote is not able to use the selected MPS */
+               if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
+                       u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
+
+                       /* Adjust the number of credits */
+                       BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
+                       chan->mps = mps_len;
+                       l2cap_chan_le_send_credits(chan);
+               }
+
                return 0;
        }
 
@@ -6785,7 +6813,7 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
        skb = NULL;
 
        if (chan->sdu->len == chan->sdu_len) {
-               err = chan->ops->recv(chan, chan->sdu);
+               err = l2cap_le_recv(chan, chan->sdu);
                if (!err) {
                        chan->sdu = NULL;
                        chan->sdu_last_frag = NULL;
@@ -7102,7 +7130,6 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
        case L2CAP_MODE_BASIC:
                break;
        case L2CAP_MODE_LE_FLOWCTL:
-               l2cap_le_flowctl_init(chan);
                break;
        case L2CAP_MODE_ERTM:
        case L2CAP_MODE_STREAMING:
@@ -7645,11 +7672,6 @@ int __init l2cap_init(void)
        l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
                                            NULL, &l2cap_debugfs_fops);
 
-       debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
-                          &le_max_credits);
-       debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
-                          &le_default_mps);
-
        return 0;
 }
 
index 3bdc8f3ca259ed2d82bb9861033814d65591a51c..ccce954f814682a40ba5d8af0ab463d5b0bfda3b 100644 (file)
@@ -2434,9 +2434,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        /* LE address type */
        addr_type = le_addr_type(cp->addr.type);
 
-       hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
-
-       err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
+       /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
+       err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
        if (err < 0) {
                err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
                                        MGMT_STATUS_NOT_PAIRED, &rp,
@@ -2450,8 +2449,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                goto done;
        }
 
-       /* Abort any ongoing SMP pairing */
-       smp_cancel_pairing(conn);
 
        /* Defer clearing up the connection parameters until closing to
         * give a chance of keeping them if a repairing happens.
index 3a7b0773536b8e226546ebe417463fcba5c92ba4..a1c1b7e8a45ca6d6c44de507d7a5ff232d776edc 100644 (file)
@@ -88,9 +88,6 @@ struct smp_dev {
        u8                      local_rand[16];
        bool                    debug_key;
 
-       u8                      min_key_size;
-       u8                      max_key_size;
-
        struct crypto_cipher    *tfm_aes;
        struct crypto_shash     *tfm_cmac;
        struct crypto_kpp       *tfm_ecdh;
@@ -720,7 +717,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
        if (rsp == NULL) {
                req->io_capability = conn->hcon->io_capability;
                req->oob_flag = oob_flag;
-               req->max_key_size = SMP_DEV(hdev)->max_key_size;
+               req->max_key_size = hdev->le_max_key_size;
                req->init_key_dist = local_dist;
                req->resp_key_dist = remote_dist;
                req->auth_req = (authreq & AUTH_REQ_MASK(hdev));
@@ -731,7 +728,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
 
        rsp->io_capability = conn->hcon->io_capability;
        rsp->oob_flag = oob_flag;
-       rsp->max_key_size = SMP_DEV(hdev)->max_key_size;
+       rsp->max_key_size = hdev->le_max_key_size;
        rsp->init_key_dist = req->init_key_dist & remote_dist;
        rsp->resp_key_dist = req->resp_key_dist & local_dist;
        rsp->auth_req = (authreq & AUTH_REQ_MASK(hdev));
@@ -745,7 +742,7 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
        struct hci_dev *hdev = conn->hcon->hdev;
        struct smp_chan *smp = chan->data;
 
-       if (max_key_size > SMP_DEV(hdev)->max_key_size ||
+       if (max_key_size > hdev->le_max_key_size ||
            max_key_size < SMP_MIN_ENC_KEY_SIZE)
                return SMP_ENC_KEY_SIZE;
 
@@ -2422,30 +2419,51 @@ unlock:
        return ret;
 }
 
-void smp_cancel_pairing(struct hci_conn *hcon)
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                 u8 addr_type)
 {
-       struct l2cap_conn *conn = hcon->l2cap_data;
+       struct hci_conn *hcon;
+       struct l2cap_conn *conn;
        struct l2cap_chan *chan;
        struct smp_chan *smp;
+       int err;
+
+       err = hci_remove_ltk(hdev, bdaddr, addr_type);
+       hci_remove_irk(hdev, bdaddr, addr_type);
 
+       hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
+       if (!hcon)
+               goto done;
+
+       conn = hcon->l2cap_data;
        if (!conn)
-               return;
+               goto done;
 
        chan = conn->smp;
        if (!chan)
-               return;
+               goto done;
 
        l2cap_chan_lock(chan);
 
        smp = chan->data;
        if (smp) {
+               /* Set keys to NULL to make sure smp_failure() does not try to
+                * remove and free already invalidated rcu list entries. */
+               smp->ltk = NULL;
+               smp->slave_ltk = NULL;
+               smp->remote_irk = NULL;
+
                if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
                        smp_failure(conn, 0);
                else
                        smp_failure(conn, SMP_UNSPECIFIED);
+               err = 0;
        }
 
        l2cap_chan_unlock(chan);
+
+done:
+       return err;
 }
 
 static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -3243,8 +3261,6 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
        smp->tfm_aes = tfm_aes;
        smp->tfm_cmac = tfm_cmac;
        smp->tfm_ecdh = tfm_ecdh;
-       smp->min_key_size = SMP_MIN_ENC_KEY_SIZE;
-       smp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
 
 create_chan:
        chan = l2cap_chan_create();
@@ -3370,7 +3386,7 @@ static ssize_t le_min_key_size_read(struct file *file,
        struct hci_dev *hdev = file->private_data;
        char buf[4];
 
-       snprintf(buf, sizeof(buf), "%2u\n", SMP_DEV(hdev)->min_key_size);
+       snprintf(buf, sizeof(buf), "%2u\n", hdev->le_min_key_size);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
 }
@@ -3391,11 +3407,11 @@ static ssize_t le_min_key_size_write(struct file *file,
 
        sscanf(buf, "%hhu", &key_size);
 
-       if (key_size > SMP_DEV(hdev)->max_key_size ||
+       if (key_size > hdev->le_max_key_size ||
            key_size < SMP_MIN_ENC_KEY_SIZE)
                return -EINVAL;
 
-       SMP_DEV(hdev)->min_key_size = key_size;
+       hdev->le_min_key_size = key_size;
 
        return count;
 }
@@ -3414,7 +3430,7 @@ static ssize_t le_max_key_size_read(struct file *file,
        struct hci_dev *hdev = file->private_data;
        char buf[4];
 
-       snprintf(buf, sizeof(buf), "%2u\n", SMP_DEV(hdev)->max_key_size);
+       snprintf(buf, sizeof(buf), "%2u\n", hdev->le_max_key_size);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
 }
@@ -3436,10 +3452,10 @@ static ssize_t le_max_key_size_write(struct file *file,
        sscanf(buf, "%hhu", &key_size);
 
        if (key_size > SMP_MAX_ENC_KEY_SIZE ||
-           key_size < SMP_DEV(hdev)->min_key_size)
+           key_size < hdev->le_min_key_size)
                return -EINVAL;
 
-       SMP_DEV(hdev)->max_key_size = key_size;
+       hdev->le_max_key_size = key_size;
 
        return count;
 }
index 0ff6247eaa6c0e8c19223c014d11a98d8adaee8a..121edadd5f8da8761c7ef464c22e6455a25d2d27 100644 (file)
@@ -181,7 +181,8 @@ enum smp_key_pref {
 };
 
 /* SMP Commands */
-void smp_cancel_pairing(struct hci_conn *hcon);
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                 u8 addr_type);
 bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
                             enum smp_key_pref key_pref);
 int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
index f0fc182d3db77eb311d91f7faef4e8a6f85886b3..b64e1649993b78939c58394aee788b48b8cefffe 100644 (file)
@@ -59,7 +59,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
        req.is_set = is_set;
        req.pid = current->pid;
        req.cmd = optname;
-       req.addr = (long)optval;
+       req.addr = (long __force __user)optval;
        req.len = optlen;
        mutex_lock(&bpfilter_lock);
        if (!info.pid)
@@ -98,7 +98,7 @@ static int __init load_umh(void)
        pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
 
        /* health check that usermode process started correctly */
-       if (__bpfilter_process_sockopt(NULL, 0, 0, 0, 0) != 0) {
+       if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) {
                stop_umh();
                return -EFAULT;
        }
index a56ed7f2a3a3050c0bf26ea452bbfe5803aea822..74331690a390a52ee090f45133b9b1bb24070400 100644 (file)
@@ -504,6 +504,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
                fdb->added_by_user = 0;
                fdb->added_by_external_learn = 0;
                fdb->offloaded = 0;
+               fdb->is_sticky = 0;
                fdb->updated = fdb->used = jiffies;
                if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
                                                  &fdb->rhnode,
index a4a848bf827b0e61fe722afccca3cde5d0062967..a7ea2d431714300bec42f4c28c6cdba46d22d2c6 100644 (file)
@@ -162,6 +162,29 @@ out:
        return err;
 }
 
+static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
+                                struct netlink_ext_ack *extack)
+{
+       struct br_port_msg *bpm;
+
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
+               NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
+               return -EINVAL;
+       }
+
+       bpm = nlmsg_data(nlh);
+       if (bpm->ifindex) {
+               NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
+               return -EINVAL;
+       }
+       if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
+               NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net_device *dev;
@@ -169,6 +192,13 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
        struct nlmsghdr *nlh = NULL;
        int idx = 0, s_idx;
 
+       if (cb->strict_check) {
+               int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
+
+               if (err < 0)
+                       return err;
+       }
+
        s_idx = cb->args[0];
 
        rcu_read_lock();
index 928024d8360d18649b34ba16bd93bf606103917d..024139b51d3a55cdcbb418ec0d11cea5e8e02f02 100644 (file)
@@ -1976,6 +1976,7 @@ void br_multicast_init(struct net_bridge *br)
        br->ip6_other_query.delay_time = 0;
        br->ip6_querier.port = NULL;
 #endif
+       br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
        br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
 
        spin_lock_init(&br->multicast_lock);
index e0a3b038d05228544ec46624b466ee3f72c21196..b1b5e8516724ae442e2a107762dc382f9d1587e0 100644 (file)
@@ -836,7 +836,8 @@ static unsigned int ip_sabotage_in(void *priv,
                                   struct sk_buff *skb,
                                   const struct nf_hook_state *state)
 {
-       if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) {
+       if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
+           !netif_is_l3_master(skb->dev)) {
                state->okfn(state->net, state->sk, skb);
                return NF_STOLEN;
        }
index 8c0ed225e2801a741f81eaa6a626eb191884f0aa..6dae81d65d5c4ed958a9044278f4c3b8b9281ca4 100644 (file)
@@ -1626,7 +1626,7 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
                if (!ops->eswitch_mode_set)
                        return -EOPNOTSUPP;
                mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]);
-               err = ops->eswitch_mode_set(devlink, mode);
+               err = ops->eswitch_mode_set(devlink, mode, info->extack);
                if (err)
                        return err;
        }
@@ -1636,7 +1636,8 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
                        return -EOPNOTSUPP;
                inline_mode = nla_get_u8(
                                info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]);
-               err = ops->eswitch_inline_mode_set(devlink, inline_mode);
+               err = ops->eswitch_inline_mode_set(devlink, inline_mode,
+                                                  info->extack);
                if (err)
                        return err;
        }
@@ -1645,7 +1646,8 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
                if (!ops->eswitch_encap_mode_set)
                        return -EOPNOTSUPP;
                encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]);
-               err = ops->eswitch_encap_mode_set(devlink, encap_mode);
+               err = ops->eswitch_encap_mode_set(devlink, encap_mode,
+                                                 info->extack);
                if (err)
                        return err;
        }
@@ -2675,6 +2677,21 @@ static const struct devlink_param devlink_param_generic[] = {
                .name = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME,
                .type = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE,
        },
+       {
+               .id = DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI,
+               .name = DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME,
+               .type = DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE,
+       },
+       {
+               .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+               .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME,
+               .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE,
+       },
+       {
+               .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+               .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME,
+               .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE,
+       },
 };
 
 static int devlink_param_generic_verify(const struct devlink_param *param)
@@ -3487,7 +3504,7 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
        start_offset = *((u64 *)&cb->args[0]);
 
        err = nlmsg_parse(cb->nlh, GENL_HDRLEN + devlink_nl_family.hdrsize,
-                         attrs, DEVLINK_ATTR_MAX, ops->policy, NULL);
+                         attrs, DEVLINK_ATTR_MAX, ops->policy, cb->extack);
        if (err)
                goto out;
 
index 5a788adeba0b27221bf0d816d1a0127aa8dafbd4..4cc603dfc9efb67954a47482ac5a7e168a09e1c2 100644 (file)
@@ -1396,6 +1396,7 @@ static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
 static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
 {
        struct ethtool_wolinfo wol;
+       int ret;
 
        if (!dev->ethtool_ops->set_wol)
                return -EOPNOTSUPP;
@@ -1403,7 +1404,13 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
        if (copy_from_user(&wol, useraddr, sizeof(wol)))
                return -EFAULT;
 
-       return dev->ethtool_ops->set_wol(dev, &wol);
+       ret = dev->ethtool_ops->set_wol(dev, &wol);
+       if (ret)
+               return ret;
+
+       dev->wol_enabled = !!wol.wolopts;
+
+       return 0;
 }
 
 static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
index 0ff3953f64aa7830a07dad9c0873813eb944fdd1..ffbb827723a236a6b187a10a33886561727ae0be 100644 (file)
@@ -1063,13 +1063,47 @@ skip:
        return err;
 }
 
+static int fib_valid_dumprule_req(const struct nlmsghdr *nlh,
+                                  struct netlink_ext_ack *extack)
+{
+       struct fib_rule_hdr *frh;
+
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
+               NL_SET_ERR_MSG(extack, "Invalid header for fib rule dump request");
+               return -EINVAL;
+       }
+
+       frh = nlmsg_data(nlh);
+       if (frh->dst_len || frh->src_len || frh->tos || frh->table ||
+           frh->res1 || frh->res2 || frh->action || frh->flags) {
+               NL_SET_ERR_MSG(extack,
+                              "Invalid values in header for fib rule dump request");
+               return -EINVAL;
+       }
+
+       if (nlmsg_attrlen(nlh, sizeof(*frh))) {
+               NL_SET_ERR_MSG(extack, "Invalid data after header in fib rule dump request");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
        struct net *net = sock_net(skb->sk);
        struct fib_rules_ops *ops;
        int idx = 0, family;
 
-       family = rtnl_msg_family(cb->nlh);
+       if (cb->strict_check) {
+               int err = fib_valid_dumprule_req(nlh, cb->extack);
+
+               if (err < 0)
+                       return err;
+       }
+
+       family = rtnl_msg_family(nlh);
        if (family != AF_UNSPEC) {
                /* Protocol specific dump request */
                ops = lookup_rules_ops(net, family);
index 65a2e820364f373f8a52b3088e9ccf279d1020ae..9bf1b9ad17806dfaa579317408e5c4707d014cc0 100644 (file)
@@ -162,7 +162,7 @@ __gnet_stats_copy_basic(const seqcount_t *running,
 }
 EXPORT_SYMBOL(__gnet_stats_copy_basic);
 
-int
+static int
 ___gnet_stats_copy_basic(const seqcount_t *running,
                         struct gnet_dump *d,
                         struct gnet_stats_basic_cpu __percpu *cpu,
index 20e0d3308148a98a16c5a446cc95506a66680f02..dc1389b8beb120a90ba65c5294c051da3e15b909 100644 (file)
@@ -2164,15 +2164,47 @@ errout:
        return err;
 }
 
+static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
+                                   struct netlink_ext_ack *extack)
+{
+       struct ndtmsg *ndtm;
+
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
+               NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
+               return -EINVAL;
+       }
+
+       ndtm = nlmsg_data(nlh);
+       if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
+               NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
+               return -EINVAL;
+       }
+
+       if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
+               NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
        struct net *net = sock_net(skb->sk);
        int family, tidx, nidx = 0;
        int tbl_skip = cb->args[0];
        int neigh_skip = cb->args[1];
        struct neigh_table *tbl;
 
-       family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
+       if (cb->strict_check) {
+               int err = neightbl_valid_dump_info(nlh, cb->extack);
+
+               if (err < 0)
+                       return err;
+       }
+
+       family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
 
        for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
                struct neigh_parms *p;
@@ -2185,7 +2217,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
                        continue;
 
                if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
-                                      cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
+                                      nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
                                       NLM_F_MULTI) < 0)
                        break;
 
@@ -2200,7 +2232,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
 
                        if (neightbl_fill_param_info(skb, tbl, p,
                                                     NETLINK_CB(cb->skb).portid,
-                                                    cb->nlh->nlmsg_seq,
+                                                    nlh->nlmsg_seq,
                                                     RTM_NEWNEIGHTBL,
                                                     NLM_F_MULTI) < 0)
                                goto out;
@@ -2329,35 +2361,24 @@ static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
        return false;
 }
 
+struct neigh_dump_filter {
+       int master_idx;
+       int dev_idx;
+};
+
 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
-                           struct netlink_callback *cb)
+                           struct netlink_callback *cb,
+                           struct neigh_dump_filter *filter)
 {
        struct net *net = sock_net(skb->sk);
-       const struct nlmsghdr *nlh = cb->nlh;
-       struct nlattr *tb[NDA_MAX + 1];
        struct neighbour *n;
        int rc, h, s_h = cb->args[1];
        int idx, s_idx = idx = cb->args[2];
        struct neigh_hash_table *nht;
-       int filter_master_idx = 0, filter_idx = 0;
        unsigned int flags = NLM_F_MULTI;
-       int err;
 
-       err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL);
-       if (!err) {
-               if (tb[NDA_IFINDEX]) {
-                       if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
-                               return -EINVAL;
-                       filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
-               }
-               if (tb[NDA_MASTER]) {
-                       if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
-                               return -EINVAL;
-                       filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
-               }
-               if (filter_idx || filter_master_idx)
-                       flags |= NLM_F_DUMP_FILTERED;
-       }
+       if (filter->dev_idx || filter->master_idx)
+               flags |= NLM_F_DUMP_FILTERED;
 
        rcu_read_lock_bh();
        nht = rcu_dereference_bh(tbl->nht);
@@ -2370,8 +2391,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
                     n = rcu_dereference_bh(n->next)) {
                        if (idx < s_idx || !net_eq(dev_net(n->dev), net))
                                goto next;
-                       if (neigh_ifindex_filtered(n->dev, filter_idx) ||
-                           neigh_master_filtered(n->dev, filter_master_idx))
+                       if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
+                           neigh_master_filtered(n->dev, filter->master_idx))
                                goto next;
                        if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
@@ -2393,12 +2414,17 @@ out:
 }
 
 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
-                            struct netlink_callback *cb)
+                            struct netlink_callback *cb,
+                            struct neigh_dump_filter *filter)
 {
        struct pneigh_entry *n;
        struct net *net = sock_net(skb->sk);
        int rc, h, s_h = cb->args[3];
        int idx, s_idx = idx = cb->args[4];
+       unsigned int flags = NLM_F_MULTI;
+
+       if (filter->dev_idx || filter->master_idx)
+               flags |= NLM_F_DUMP_FILTERED;
 
        read_lock_bh(&tbl->lock);
 
@@ -2408,10 +2434,12 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
                for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
                        if (idx < s_idx || pneigh_net(n) != net)
                                goto next;
+                       if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
+                           neigh_master_filtered(n->dev, filter->master_idx))
+                               goto next;
                        if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
-                                           RTM_NEWNEIGH,
-                                           NLM_F_MULTI, tbl) < 0) {
+                                           RTM_NEWNEIGH, flags, tbl) < 0) {
                                read_unlock_bh(&tbl->lock);
                                rc = -1;
                                goto out;
@@ -2430,22 +2458,91 @@ out:
 
 }
 
+static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
+                               bool strict_check,
+                               struct neigh_dump_filter *filter,
+                               struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[NDA_MAX + 1];
+       int err, i;
+
+       if (strict_check) {
+               struct ndmsg *ndm;
+
+               if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
+                       NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
+                       return -EINVAL;
+               }
+
+               ndm = nlmsg_data(nlh);
+               if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
+                   ndm->ndm_state || ndm->ndm_flags || ndm->ndm_type) {
+                       NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
+                       return -EINVAL;
+               }
+
+               err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
+                                        NULL, extack);
+       } else {
+               err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
+                                 NULL, extack);
+       }
+       if (err < 0)
+               return err;
+
+       for (i = 0; i <= NDA_MAX; ++i) {
+               if (!tb[i])
+                       continue;
+
+               /* all new attributes should require strict_check */
+               switch (i) {
+               case NDA_IFINDEX:
+                       if (nla_len(tb[i]) != sizeof(u32)) {
+                               NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in neighbor dump request");
+                               return -EINVAL;
+                       }
+                       filter->dev_idx = nla_get_u32(tb[i]);
+                       break;
+               case NDA_MASTER:
+                       if (nla_len(tb[i]) != sizeof(u32)) {
+                               NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in neighbor dump request");
+                               return -EINVAL;
+                       }
+                       filter->master_idx = nla_get_u32(tb[i]);
+                       break;
+               default:
+                       if (strict_check) {
+                               NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
+                               return -EINVAL;
+                       }
+               }
+       }
+
+       return 0;
+}
+
 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
+       struct neigh_dump_filter filter = {};
        struct neigh_table *tbl;
        int t, family, s_t;
        int proxy = 0;
        int err;
 
-       family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
+       family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
 
        /* check for full ndmsg structure presence, family member is
         * the same for both structures
         */
-       if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
-           ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
+       if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
+           ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
                proxy = 1;
 
+       err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
+       if (err < 0 && cb->strict_check)
+               return err;
+
        s_t = cb->args[0];
 
        for (t = 0; t < NEIGH_NR_TABLES; t++) {
@@ -2459,9 +2556,9 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
                        memset(&cb->args[1], 0, sizeof(cb->args) -
                                                sizeof(cb->args[0]));
                if (proxy)
-                       err = pneigh_dump_table(tbl, skb, cb);
+                       err = pneigh_dump_table(tbl, skb, cb, &filter);
                else
-                       err = neigh_dump_table(tbl, skb, cb);
+                       err = neigh_dump_table(tbl, skb, cb, &filter);
                if (err < 0)
                        break;
        }
index 670c84b1bfc23bbb9dde13b4df21a503350d390e..fefe72774aeb3a78d2bec6894cd9494741a78ee4 100644 (file)
@@ -853,6 +853,12 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
                .s_idx = cb->args[0],
        };
 
+       if (cb->strict_check &&
+           nlmsg_attrlen(cb->nlh, sizeof(struct rtgenmsg))) {
+                       NL_SET_ERR_MSG(cb->extack, "Unknown data in network namespace id dump request");
+                       return -EINVAL;
+       }
+
        spin_lock_bh(&net->nsid_lock);
        idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
        spin_unlock_bh(&net->nsid_lock);
index 3219a2932463096566ce8ff336ecdf699422dd65..de1d1ba92f2de39292987e1408db0c2b821c4b6d 100644 (file)
@@ -135,27 +135,9 @@ static void queue_process(struct work_struct *work)
        }
 }
 
-/*
- * Check whether delayed processing was scheduled for our NIC. If so,
- * we attempt to grab the poll lock and use ->poll() to pump the card.
- * If this fails, either we've recursed in ->poll() or it's already
- * running on another CPU.
- *
- * Note: we don't mask interrupts with this lock because we're using
- * trylock here and interrupts are already disabled in the softirq
- * case. Further, we test the poll_owner to avoid recursion on UP
- * systems where the lock doesn't exist.
- */
 static void poll_one_napi(struct napi_struct *napi)
 {
-       int work = 0;
-
-       /* net_rx_action's ->poll() invocations and our's are
-        * synchronized by this test which is only made while
-        * holding the napi->poll_lock.
-        */
-       if (!test_bit(NAPI_STATE_SCHED, &napi->state))
-               return;
+       int work;
 
        /* If we set this bit but see that it has already been set,
         * that indicates that napi has been disabled and we need
@@ -330,6 +312,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
        /* It is up to the caller to keep npinfo alive. */
        struct netpoll_info *npinfo;
 
+       rcu_read_lock_bh();
        lockdep_assert_irqs_disabled();
 
        npinfo = rcu_dereference_bh(np->dev->npinfo);
@@ -374,6 +357,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                skb_queue_tail(&npinfo->txq, skb);
                schedule_delayed_work(&npinfo->tx_work,0);
        }
+       rcu_read_unlock_bh();
 }
 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
 
index 35162e1b06adcd8314c1080318cb8932caf1c972..c894c4af89817a780b7da6e2fbb03a7e0a6f7518 100644 (file)
@@ -1878,8 +1878,52 @@ struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
 }
 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
 
+static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
+                                     bool strict_check, struct nlattr **tb,
+                                     struct netlink_ext_ack *extack)
+{
+       int hdrlen;
+
+       if (strict_check) {
+               struct ifinfomsg *ifm;
+
+               if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+                       NL_SET_ERR_MSG(extack, "Invalid header for link dump");
+                       return -EINVAL;
+               }
+
+               ifm = nlmsg_data(nlh);
+               if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
+                   ifm->ifi_change) {
+                       NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
+                       return -EINVAL;
+               }
+               if (ifm->ifi_index) {
+                       NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
+                       return -EINVAL;
+               }
+
+               return nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
+                                         ifla_policy, extack);
+       }
+
+       /* A hack to preserve kernel<->userspace interface.
+        * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
+        * However, before Linux v3.9 the code here assumed rtgenmsg and that's
+        * what iproute2 < v3.9.0 used.
+        * We can detect the old iproute2. Even including the IFLA_EXT_MASK
+        * attribute, its netlink message is shorter than struct ifinfomsg.
+        */
+       hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
+                sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
+
+       return nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, extack);
+}
+
 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct netlink_ext_ack *extack = cb->extack;
+       const struct nlmsghdr *nlh = cb->nlh;
        struct net *net = sock_net(skb->sk);
        struct net *tgt_net = net;
        int h, s_h;
@@ -1892,46 +1936,54 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        unsigned int flags = NLM_F_MULTI;
        int master_idx = 0;
        int netnsid = -1;
-       int err;
-       int hdrlen;
+       int err, i;
 
        s_h = cb->args[0];
        s_idx = cb->args[1];
 
-       /* A hack to preserve kernel<->userspace interface.
-        * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
-        * However, before Linux v3.9 the code here assumed rtgenmsg and that's
-        * what iproute2 < v3.9.0 used.
-        * We can detect the old iproute2. Even including the IFLA_EXT_MASK
-        * attribute, its netlink message is shorter than struct ifinfomsg.
-        */
-       hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
-                sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
+       err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
+       if (err < 0) {
+               if (cb->strict_check)
+                       return err;
 
-       if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX,
-                       ifla_policy, NULL) >= 0) {
-               if (tb[IFLA_TARGET_NETNSID]) {
-                       netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
+               goto walk_entries;
+       }
+
+       for (i = 0; i <= IFLA_MAX; ++i) {
+               if (!tb[i])
+                       continue;
+
+               /* new attributes should only be added with strict checking */
+               switch (i) {
+               case IFLA_TARGET_NETNSID:
+                       netnsid = nla_get_s32(tb[i]);
                        tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
                        if (IS_ERR(tgt_net)) {
-                               tgt_net = net;
-                               netnsid = -1;
+                               NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
+                               return PTR_ERR(tgt_net);
+                       }
+                       break;
+               case IFLA_EXT_MASK:
+                       ext_filter_mask = nla_get_u32(tb[i]);
+                       break;
+               case IFLA_MASTER:
+                       master_idx = nla_get_u32(tb[i]);
+                       break;
+               case IFLA_LINKINFO:
+                       kind_ops = linkinfo_to_kind_ops(tb[i]);
+                       break;
+               default:
+                       if (cb->strict_check) {
+                               NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
+                               return -EINVAL;
                        }
                }
-
-               if (tb[IFLA_EXT_MASK])
-                       ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
-
-               if (tb[IFLA_MASTER])
-                       master_idx = nla_get_u32(tb[IFLA_MASTER]);
-
-               if (tb[IFLA_LINKINFO])
-                       kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]);
-
-               if (master_idx || kind_ops)
-                       flags |= NLM_F_DUMP_FILTERED;
        }
 
+       if (master_idx || kind_ops)
+               flags |= NLM_F_DUMP_FILTERED;
+
+walk_entries:
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
                head = &tgt_net->dev_index_head[h];
@@ -1943,8 +1995,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                        err = rtnl_fill_ifinfo(skb, dev, net,
                                               RTM_NEWLINK,
                                               NETLINK_CB(cb->skb).portid,
-                                              cb->nlh->nlmsg_seq, 0,
-                                              flags,
+                                              nlh->nlmsg_seq, 0, flags,
                                               ext_filter_mask, 0, NULL, 0,
                                               netnsid);
 
@@ -2852,6 +2903,12 @@ struct net_device *rtnl_create_link(struct net *net,
        else if (ops->get_num_rx_queues)
                num_rx_queues = ops->get_num_rx_queues();
 
+       if (num_tx_queues < 1 || num_tx_queues > 4096)
+               return ERR_PTR(-EINVAL);
+
+       if (num_rx_queues < 1 || num_rx_queues > 4096)
+               return ERR_PTR(-EINVAL);
+
        dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
                               ops->setup, num_tx_queues, num_rx_queues);
        if (!dev)
@@ -3742,14 +3799,98 @@ out:
 }
 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
 
+static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
+                                int *br_idx, int *brport_idx,
+                                struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[NDA_MAX + 1];
+       struct ndmsg *ndm;
+       int err, i;
+
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
+               NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
+               return -EINVAL;
+       }
+
+       ndm = nlmsg_data(nlh);
+       if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
+           ndm->ndm_flags || ndm->ndm_type) {
+               NL_SET_ERR_MSG(extack, "Invalid values in header for fbd dump request");
+               return -EINVAL;
+       }
+
+       err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
+                                NULL, extack);
+       if (err < 0)
+               return err;
+
+       *brport_idx = ndm->ndm_ifindex;
+       for (i = 0; i <= NDA_MAX; ++i) {
+               if (!tb[i])
+                       continue;
+
+               switch (i) {
+               case NDA_IFINDEX:
+                       if (nla_len(tb[i]) != sizeof(u32)) {
+                               NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
+                               return -EINVAL;
+                       }
+                       *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
+                       break;
+               case NDA_MASTER:
+                       if (nla_len(tb[i]) != sizeof(u32)) {
+                               NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
+                               return -EINVAL;
+                       }
+                       *br_idx = nla_get_u32(tb[NDA_MASTER]);
+                       break;
+               default:
+                       NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
+                                int *br_idx, int *brport_idx,
+                                struct netlink_ext_ack *extack)
+{
+       struct ifinfomsg *ifm = nlmsg_data(nlh);
+       struct nlattr *tb[IFLA_MAX+1];
+       int err;
+
+       /* A hack to preserve kernel<->userspace interface.
+        * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
+        * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
+        * So, check for ndmsg with an optional u32 attribute (not used here).
+        * Fortunately these sizes don't conflict with the size of ifinfomsg
+        * with an optional attribute.
+        */
+       if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
+           (nlmsg_len(nlh) != sizeof(struct ndmsg) +
+            nla_attr_size(sizeof(u32)))) {
+               err = nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+                                 ifla_policy, extack);
+               if (err < 0) {
+                       return -EINVAL;
+               } else if (err == 0) {
+                       if (tb[IFLA_MASTER])
+                               *br_idx = nla_get_u32(tb[IFLA_MASTER]);
+               }
+
+               *brport_idx = ifm->ifi_index;
+       }
+       return 0;
+}
+
 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net_device *dev;
-       struct nlattr *tb[IFLA_MAX+1];
        struct net_device *br_dev = NULL;
        const struct net_device_ops *ops = NULL;
        const struct net_device_ops *cops = NULL;
-       struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
        struct net *net = sock_net(skb->sk);
        struct hlist_head *head;
        int brport_idx = 0;
@@ -3759,16 +3900,14 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
        int err = 0;
        int fidx = 0;
 
-       err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
-                         IFLA_MAX, ifla_policy, NULL);
-       if (err < 0) {
-               return -EINVAL;
-       } else if (err == 0) {
-               if (tb[IFLA_MASTER])
-                       br_idx = nla_get_u32(tb[IFLA_MASTER]);
-       }
-
-       brport_idx = ifm->ifi_index;
+       if (cb->strict_check)
+               err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
+                                           cb->extack);
+       else
+               err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
+                                           cb->extack);
+       if (err < 0)
+               return err;
 
        if (br_idx) {
                br_dev = __dev_get_by_index(net, br_idx);
@@ -3953,28 +4092,72 @@ nla_put_failure:
 }
 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
 
+static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
+                                   bool strict_check, u32 *filter_mask,
+                                   struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[IFLA_MAX+1];
+       int err, i;
+
+       if (strict_check) {
+               struct ifinfomsg *ifm;
+
+               if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+                       NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
+                       return -EINVAL;
+               }
+
+               ifm = nlmsg_data(nlh);
+               if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
+                   ifm->ifi_change || ifm->ifi_index) {
+                       NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
+                       return -EINVAL;
+               }
+
+               err = nlmsg_parse_strict(nlh, sizeof(struct ifinfomsg), tb,
+                                        IFLA_MAX, ifla_policy, extack);
+       } else {
+               err = nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb,
+                                 IFLA_MAX, ifla_policy, extack);
+       }
+       if (err < 0)
+               return err;
+
+       /* new attributes should only be added with strict checking */
+       for (i = 0; i <= IFLA_MAX; ++i) {
+               if (!tb[i])
+                       continue;
+
+               switch (i) {
+               case IFLA_EXT_MASK:
+                       *filter_mask = nla_get_u32(tb[i]);
+                       break;
+               default:
+                       if (strict_check) {
+                               NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
+                               return -EINVAL;
+                       }
+               }
+       }
+
+       return 0;
+}
+
 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
        struct net *net = sock_net(skb->sk);
        struct net_device *dev;
        int idx = 0;
        u32 portid = NETLINK_CB(cb->skb).portid;
-       u32 seq = cb->nlh->nlmsg_seq;
+       u32 seq = nlh->nlmsg_seq;
        u32 filter_mask = 0;
        int err;
 
-       if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
-               struct nlattr *extfilt;
-
-               extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
-                                         IFLA_EXT_MASK);
-               if (extfilt) {
-                       if (nla_len(extfilt) < sizeof(filter_mask))
-                               return -EINVAL;
-
-                       filter_mask = nla_get_u32(extfilt);
-               }
-       }
+       err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
+                                      cb->extack);
+       if (err < 0 && cb->strict_check)
+               return err;
 
        rcu_read_lock();
        for_each_netdev_rcu(net, dev) {
@@ -4568,6 +4751,7 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
 
 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct netlink_ext_ack *extack = cb->extack;
        int h, s_h, err, s_idx, s_idxattr, s_prividx;
        struct net *net = sock_net(skb->sk);
        unsigned int flags = NLM_F_MULTI;
@@ -4584,13 +4768,32 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        cb->seq = net->dev_base_seq;
 
-       if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
+       if (nlmsg_len(cb->nlh) < sizeof(*ifsm)) {
+               NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
                return -EINVAL;
+       }
 
        ifsm = nlmsg_data(cb->nlh);
+
+       /* only requests using NLM_F_DUMP_PROPER_HDR can pass data to
+        * influence the dump. The legacy exception is filter_mask.
+        */
+       if (cb->strict_check) {
+               if (ifsm->pad1 || ifsm->pad2 || ifsm->ifindex) {
+                       NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
+                       return -EINVAL;
+               }
+               if (nlmsg_attrlen(cb->nlh, sizeof(*ifsm))) {
+                       NL_SET_ERR_MSG(extack, "Invalid attributes after stats header");
+                       return -EINVAL;
+               }
+       }
+
        filter_mask = ifsm->filter_mask;
-       if (!filter_mask)
+       if (!filter_mask) {
+               NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
                return -EINVAL;
+       }
 
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
index b2c807f67aba5847fa0c9f07adabbff7cf1afd22..0e937d3d85b556e8738717a6ddec1bd5ecde7b6f 100644 (file)
@@ -3381,64 +3381,6 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
 }
 EXPORT_SYMBOL(skb_find_text);
 
-/**
- * skb_append_datato_frags - append the user data to a skb
- * @sk: sock  structure
- * @skb: skb structure to be appended with user data.
- * @getfrag: call back function to be used for getting the user data
- * @from: pointer to user message iov
- * @length: length of the iov message
- *
- * Description: This procedure append the user data in the fragment part
- * of the skb if any page alloc fails user this procedure returns  -ENOMEM
- */
-int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
-                       int (*getfrag)(void *from, char *to, int offset,
-                                       int len, int odd, struct sk_buff *skb),
-                       void *from, int length)
-{
-       int frg_cnt = skb_shinfo(skb)->nr_frags;
-       int copy;
-       int offset = 0;
-       int ret;
-       struct page_frag *pfrag = &current->task_frag;
-
-       do {
-               /* Return error if we don't have space for new frag */
-               if (frg_cnt >= MAX_SKB_FRAGS)
-                       return -EMSGSIZE;
-
-               if (!sk_page_frag_refill(sk, pfrag))
-                       return -ENOMEM;
-
-               /* copy the user data to page */
-               copy = min_t(int, length, pfrag->size - pfrag->offset);
-
-               ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
-                             offset, copy, 0, skb);
-               if (ret < 0)
-                       return -EFAULT;
-
-               /* copy was successful so update the size parameters */
-               skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
-                                  copy);
-               frg_cnt++;
-               pfrag->offset += copy;
-               get_page(pfrag->page);
-
-               skb->truesize += copy;
-               refcount_add(copy, &sk->sk_wmem_alloc);
-               skb->len += copy;
-               skb->data_len += copy;
-               offset += copy;
-               length -= copy;
-
-       } while (length > 0);
-
-       return 0;
-}
-EXPORT_SYMBOL(skb_append_datato_frags);
-
 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
                         int offset, size_t size)
 {
index 8537b6ca72c5013a75c70978c079646f6278aabf..7e8796a6a0892efbb7dfce67d12b8062b2d5daa9 100644 (file)
@@ -2317,7 +2317,7 @@ static void __lock_sock(struct sock *sk)
        finish_wait(&sk->sk_lock.wq, &wait);
 }
 
-static void __release_sock(struct sock *sk)
+void __release_sock(struct sock *sk)
        __releases(&sk->sk_lock.slock)
        __acquires(&sk->sk_lock.slock)
 {
index d28d46bff6ab43441f34284ec975c1e052a774d0..85d6c879383da8994c6b20cd1e49e0f667a07482 100644 (file)
@@ -606,11 +606,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        if (sk->sk_state == DCCP_LISTEN) {
                if (dh->dccph_type == DCCP_PKT_REQUEST) {
                        /* It is possible that we process SYN packets from backlog,
-                        * so we need to make sure to disable BH right there.
+                        * so we need to make sure to disable BH and RCU right there.
                         */
+                       rcu_read_lock();
                        local_bh_disable();
                        acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
                        local_bh_enable();
+                       rcu_read_unlock();
                        if (!acceptable)
                                return 1;
                        consume_skb(skb);
index b08feb219b44b67eadf408a33649d8c7ec9db2d0..8e08cea6f17866b5fb1619f570de747c6a837cbd 100644 (file)
@@ -493,9 +493,11 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
 
                dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
                                                              ireq->ir_rmt_addr);
+               rcu_read_lock();
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq_opt_deref(ireq));
+                                           rcu_dereference(ireq->ireq_opt));
+               rcu_read_unlock();
                err = net_xmit_eval(err);
        }
 
index 7f4534828f6c96c0b0fe99d54add4ce9be0c1877..a65d553e730d9130d0fe26d896aa2cffcc5aaae1 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/keyctl.h>
 #include <linux/err.h>
 #include <linux/seq_file.h>
+#include <linux/dns_resolver.h>
 #include <keys/dns_resolver-type.h>
 #include <keys/user-type.h>
 #include "internal.h"
@@ -48,27 +49,86 @@ const struct cred *dns_resolver_cache;
 /*
  * Preparse instantiation data for a dns_resolver key.
  *
- * The data must be a NUL-terminated string, with the NUL char accounted in
- * datalen.
+ * For normal hostname lookups, the data must be a NUL-terminated string, with
+ * the NUL char accounted in datalen.
  *
  * If the data contains a '#' characters, then we take the clause after each
  * one to be an option of the form 'key=value'.  The actual data of interest is
  * the string leading up to the first '#'.  For instance:
  *
  *        "ip1,ip2,...#foo=bar"
+ *
+ * For server list requests, the data must begin with a NUL char and be
+ * followed by a byte indicating the version of the data format.  Version 1
+ * looks something like (note this is packed):
+ *
+ *     u8      Non-string marker (ie. 0)
+ *     u8      Content (DNS_PAYLOAD_IS_*)
+ *     u8      Version (e.g. 1)
+ *     u8      Source of server list
+ *     u8      Lookup status of server list
+ *     u8      Number of servers
+ *     foreach-server {
+ *             __le16  Name length
+ *             __le16  Priority (as per SRV record, low first)
+ *             __le16  Weight (as per SRV record, higher first)
+ *             __le16  Port
+ *             u8      Source of address list
+ *             u8      Lookup status of address list
+ *             u8      Protocol (DNS_SERVER_PROTOCOL_*)
+ *             u8      Number of addresses
+ *             char[]  Name (not NUL-terminated)
+ *             foreach-address {
+ *                     u8              Family (DNS_ADDRESS_IS_*)
+ *                     union {
+ *                             u8[4]   ipv4_addr
+ *                             u8[16]  ipv6_addr
+ *                     }
+ *             }
+ *     }
+ *
  */
 static int
 dns_resolver_preparse(struct key_preparsed_payload *prep)
 {
+       const struct dns_payload_header *bin;
        struct user_key_payload *upayload;
        unsigned long derrno;
        int ret;
        int datalen = prep->datalen, result_len = 0;
        const char *data = prep->data, *end, *opt;
 
+       if (datalen <= 1 || !data)
+               return -EINVAL;
+
+       if (data[0] == 0) {
+               /* It may be a server list. */
+               if (datalen <= sizeof(*bin))
+                       return -EINVAL;
+
+               bin = (const struct dns_payload_header *)data;
+               kenter("[%u,%u],%u", bin->content, bin->version, datalen);
+               if (bin->content != DNS_PAYLOAD_IS_SERVER_LIST) {
+                       pr_warn_ratelimited(
+                               "dns_resolver: Unsupported content type (%u)\n",
+                               bin->content);
+                       return -EINVAL;
+               }
+
+               if (bin->version != 1) {
+                       pr_warn_ratelimited(
+                               "dns_resolver: Unsupported server list version (%u)\n",
+                               bin->version);
+                       return -EINVAL;
+               }
+
+               result_len = datalen;
+               goto store_result;
+       }
+
        kenter("'%*.*s',%u", datalen, datalen, data, datalen);
 
-       if (datalen <= 1 || !data || data[datalen - 1] != '\0')
+       if (!data || data[datalen - 1] != '\0')
                return -EINVAL;
        datalen--;
 
@@ -144,6 +204,7 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
                return 0;
        }
 
+store_result:
        kdebug("store result");
        prep->quotalen = result_len;
 
index 49da67034f29cd488053648a73dd140ac9e94063..76338c38738a748aeaa899ebf30247c0f69741a8 100644 (file)
@@ -148,12 +148,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
 
        if (_result) {
                ret = -ENOMEM;
-               *_result = kmalloc(len + 1, GFP_KERNEL);
+               *_result = kmemdup_nul(upayload->data, len, GFP_KERNEL);
                if (!*_result)
                        goto put;
-
-               memcpy(*_result, upayload->data, len);
-               (*_result)[len] = '\0';
        }
 
        if (_expiry)
index f915abff1350a86af8d5bb89725b751c061b0fb5..300921417f89f387e70c4ed0acb2e8bca01591bb 100644 (file)
@@ -42,7 +42,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        oif = sk->sk_bound_dev_if;
        saddr = inet->inet_saddr;
        if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
-               if (!oif)
+               if (!oif || netif_index_is_l3_master(sock_net(sk), oif))
                        oif = inet->mc_index;
                if (!saddr)
                        saddr = inet->mc_addr;
index 44d931a3cd50c1b56cdd1365d84c5a828a9853c3..d122ebbe5980139f892431af0eb98fa514f0a448 100644 (file)
@@ -782,7 +782,8 @@ static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
 }
 
 static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
-                                      __u32 *pvalid_lft, __u32 *pprefered_lft)
+                                      __u32 *pvalid_lft, __u32 *pprefered_lft,
+                                      struct netlink_ext_ack *extack)
 {
        struct nlattr *tb[IFA_MAX+1];
        struct in_ifaddr *ifa;
@@ -792,7 +793,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
        int err;
 
        err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy,
-                         NULL);
+                         extack);
        if (err < 0)
                goto errout;
 
@@ -897,7 +898,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        ASSERT_RTNL();
 
-       ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
+       ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft, extack);
        if (IS_ERR(ifa))
                return PTR_ERR(ifa);
 
@@ -1659,17 +1660,70 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
+static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
+                                     struct inet_fill_args *fillargs,
+                                     struct net **tgt_net, struct sock *sk,
+                                     struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[IFA_MAX+1];
+       struct ifaddrmsg *ifm;
+       int err, i;
+
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+               NL_SET_ERR_MSG(extack, "ipv4: Invalid header for address dump request");
+               return -EINVAL;
+       }
+
+       ifm = nlmsg_data(nlh);
+       if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
+               NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for address dump request");
+               return -EINVAL;
+       }
+       if (ifm->ifa_index) {
+               NL_SET_ERR_MSG(extack, "ipv4: Filter by device index not supported for address dump");
+               return -EINVAL;
+       }
+
+       err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
+                                ifa_ipv4_policy, extack);
+       if (err < 0)
+               return err;
+
+       for (i = 0; i <= IFA_MAX; ++i) {
+               if (!tb[i])
+                       continue;
+
+               if (i == IFA_TARGET_NETNSID) {
+                       struct net *net;
+
+                       fillargs->netnsid = nla_get_s32(tb[i]);
+
+                       net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
+                       if (IS_ERR(net)) {
+                               NL_SET_ERR_MSG(extack, "ipv4: Invalid target network namespace id");
+                               return PTR_ERR(net);
+                       }
+                       *tgt_net = net;
+               } else {
+                       NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in dump request");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
        struct inet_fill_args fillargs = {
                .portid = NETLINK_CB(cb->skb).portid,
-               .seq = cb->nlh->nlmsg_seq,
+               .seq = nlh->nlmsg_seq,
                .event = RTM_NEWADDR,
                .flags = NLM_F_MULTI,
                .netnsid = -1,
        };
        struct net *net = sock_net(skb->sk);
-       struct nlattr *tb[IFA_MAX+1];
        struct net *tgt_net = net;
        int h, s_h;
        int idx, s_idx;
@@ -1683,16 +1737,13 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
        s_idx = idx = cb->args[1];
        s_ip_idx = ip_idx = cb->args[2];
 
-       if (nlmsg_parse(cb->nlh, sizeof(struct ifaddrmsg), tb, IFA_MAX,
-                       ifa_ipv4_policy, NULL) >= 0) {
-               if (tb[IFA_TARGET_NETNSID]) {
-                       fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
+       if (cb->strict_check) {
+               int err;
 
-                       tgt_net = rtnl_get_net_ns_capable(skb->sk,
-                                                         fillargs.netnsid);
-                       if (IS_ERR(tgt_net))
-                               return PTR_ERR(tgt_net);
-               }
+               err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
+                                                skb->sk, cb->extack);
+               if (err < 0)
+                       return err;
        }
 
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
@@ -2035,6 +2086,7 @@ errout:
 static int inet_netconf_dump_devconf(struct sk_buff *skb,
                                     struct netlink_callback *cb)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
        struct net *net = sock_net(skb->sk);
        int h, s_h;
        int idx, s_idx;
@@ -2042,6 +2094,21 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
        struct in_device *in_dev;
        struct hlist_head *head;
 
+       if (cb->strict_check) {
+               struct netlink_ext_ack *extack = cb->extack;
+               struct netconfmsg *ncm;
+
+               if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
+                       NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf dump request");
+                       return -EINVAL;
+               }
+
+               if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
+                       NL_SET_ERR_MSG(extack, "ipv4: Invalid data after header in netconf dump request");
+                       return -EINVAL;
+               }
+       }
+
        s_h = cb->args[0];
        s_idx = idx = cb->args[1];
 
@@ -2061,7 +2128,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
                        if (inet_netconf_fill_devconf(skb, dev->ifindex,
                                                      &in_dev->cnf,
                                                      NETLINK_CB(cb->skb).portid,
-                                                     cb->nlh->nlmsg_seq,
+                                                     nlh->nlmsg_seq,
                                                      RTM_NEWNETCONF,
                                                      NLM_F_MULTI,
                                                      NETCONFA_ALL) < 0) {
@@ -2078,7 +2145,7 @@ cont:
                if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
                                              net->ipv4.devconf_all,
                                              NETLINK_CB(cb->skb).portid,
-                                             cb->nlh->nlmsg_seq,
+                                             nlh->nlmsg_seq,
                                              RTM_NEWNETCONF, NLM_F_MULTI,
                                              NETCONFA_ALL) < 0)
                        goto done;
@@ -2089,7 +2156,7 @@ cont:
                if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
                                              net->ipv4.devconf_dflt,
                                              NETLINK_CB(cb->skb).portid,
-                                             cb->nlh->nlmsg_seq,
+                                             nlh->nlmsg_seq,
                                              RTM_NEWNETCONF, NLM_F_MULTI,
                                              NETCONFA_ALL) < 0)
                        goto done;
index 071533dd33c2a6c4bfc74a57e7ffedea0070bbfd..9e1c840596c5ccd504598260dda853d77784860d 100644 (file)
@@ -683,12 +683,11 @@ static void esp_input_done_esn(struct crypto_async_request *base, int err)
  */
 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-       struct ip_esp_hdr *esph;
        struct crypto_aead *aead = x->data;
        struct aead_request *req;
        struct sk_buff *trailer;
        int ivlen = crypto_aead_ivsize(aead);
-       int elen = skb->len - sizeof(*esph) - ivlen;
+       int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
        int nfrags;
        int assoclen;
        int seqhilen;
@@ -698,13 +697,13 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
        struct scatterlist *sg;
        int err = -EINVAL;
 
-       if (!pskb_may_pull(skb, sizeof(*esph) + ivlen))
+       if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
                goto out;
 
        if (elen <= 0)
                goto out;
 
-       assoclen = sizeof(*esph);
+       assoclen = sizeof(struct ip_esp_hdr);
        seqhilen = 0;
 
        if (x->props.flags & XFRM_STATE_ESN) {
index 30e2bcc3ef2a293568076228fecf3cf07ba01f20..038f511c73fa176ebe075ebfa8175aeac5dba285 100644 (file)
@@ -802,8 +802,40 @@ errout:
        return err;
 }
 
+int ip_valid_fib_dump_req(const struct nlmsghdr *nlh,
+                         struct netlink_ext_ack *extack)
+{
+       struct rtmsg *rtm;
+
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
+               NL_SET_ERR_MSG(extack, "Invalid header for FIB dump request");
+               return -EINVAL;
+       }
+
+       rtm = nlmsg_data(nlh);
+       if (rtm->rtm_dst_len || rtm->rtm_src_len  || rtm->rtm_tos   ||
+           rtm->rtm_table   || rtm->rtm_protocol || rtm->rtm_scope ||
+           rtm->rtm_type) {
+               NL_SET_ERR_MSG(extack, "Invalid values in header for FIB dump request");
+               return -EINVAL;
+       }
+       if (rtm->rtm_flags & ~(RTM_F_CLONED | RTM_F_PREFIX)) {
+               NL_SET_ERR_MSG(extack, "Invalid flags for FIB dump request");
+               return -EINVAL;
+       }
+
+       if (nlmsg_attrlen(nlh, sizeof(*rtm))) {
+               NL_SET_ERR_MSG(extack, "Invalid data after header in FIB dump request");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ip_valid_fib_dump_req);
+
 static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
        struct net *net = sock_net(skb->sk);
        unsigned int h, s_h;
        unsigned int e = 0, s_e;
@@ -811,8 +843,14 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
        struct hlist_head *head;
        int dumped = 0, err;
 
-       if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
-           ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
+       if (cb->strict_check) {
+               err = ip_valid_fib_dump_req(nlh, cb->extack);
+               if (err < 0)
+                       return err;
+       }
+
+       if (nlmsg_len(nlh) >= sizeof(struct rtmsg) &&
+           ((struct rtmsg *)nlmsg_data(nlh))->rtm_flags & RTM_F_CLONED)
                return skb->len;
 
        s_h = cb->args[0];
index bee8db979195e72f620b2bbf8e3385b51b774a11..f8c7ec8171a82820d1bbffb8a211589467d7afec 100644 (file)
@@ -208,7 +208,6 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
 static void free_fib_info_rcu(struct rcu_head *head)
 {
        struct fib_info *fi = container_of(head, struct fib_info, rcu);
-       struct dst_metrics *m;
 
        change_nexthops(fi) {
                if (nexthop_nh->nh_dev)
@@ -219,9 +218,8 @@ static void free_fib_info_rcu(struct rcu_head *head)
                rt_fibinfo_free(&nexthop_nh->nh_rth_input);
        } endfor_nexthops(fi);
 
-       m = fi->fib_metrics;
-       if (m != &dst_default_metrics && refcount_dec_and_test(&m->refcnt))
-               kfree(m);
+       ip_fib_metrics_put(fi->fib_metrics);
+
        kfree(fi);
 }
 
@@ -1020,13 +1018,6 @@ static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
        return true;
 }
 
-static int
-fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
-{
-       return ip_metrics_convert(fi->fib_net, cfg->fc_mx, cfg->fc_mx_len,
-                                 fi->fib_metrics->metrics);
-}
-
 struct fib_info *fib_create_info(struct fib_config *cfg,
                                 struct netlink_ext_ack *extack)
 {
@@ -1084,16 +1075,14 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
        fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
        if (!fi)
                goto failure;
-       if (cfg->fc_mx) {
-               fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
-               if (unlikely(!fi->fib_metrics)) {
-                       kfree(fi);
-                       return ERR_PTR(err);
-               }
-               refcount_set(&fi->fib_metrics->refcnt, 1);
-       } else {
-               fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
+       fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx,
+                                             cfg->fc_mx_len);
+       if (unlikely(IS_ERR(fi->fib_metrics))) {
+               err = PTR_ERR(fi->fib_metrics);
+               kfree(fi);
+               return ERR_PTR(err);
        }
+
        fib_info_cnt++;
        fi->fib_net = net;
        fi->fib_protocol = cfg->fc_protocol;
@@ -1112,10 +1101,6 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
                        goto failure;
        } endfor_nexthops(fi)
 
-       err = fib_convert_metrics(fi, cfg);
-       if (err)
-               goto failure;
-
        if (cfg->fc_mp) {
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
                err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg, extack);
index dfd5009f96ef7111a593651a48f73c4a92c3ed15..15e7f7915a21e0fbce09d5d2c17d877eae499e03 100644 (file)
@@ -544,7 +544,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
        struct ip_options_rcu *opt;
        struct rtable *rt;
 
-       opt = ireq_opt_deref(ireq);
+       rcu_read_lock();
+       opt = rcu_dereference(ireq->ireq_opt);
 
        flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
@@ -558,11 +559,13 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
                goto no_route;
        if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
                goto route_err;
+       rcu_read_unlock();
        return &rt->dst;
 
 route_err:
        ip_rt_put(rt);
 no_route:
+       rcu_read_unlock();
        __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
        return NULL;
 }
index c0fe5ad996f238091f5b9585adb586a571f653f0..26c36cccabdc2c8cc95cfd609672d412c493fc42 100644 (file)
@@ -149,7 +149,6 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
 {
        struct sockaddr_in sin;
-       const struct iphdr *iph = ip_hdr(skb);
        __be16 *ports;
        int end;
 
@@ -164,7 +163,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
        ports = (__be16 *)skb_transport_header(skb);
 
        sin.sin_family = AF_INET;
-       sin.sin_addr.s_addr = iph->daddr;
+       sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
        sin.sin_port = ports[1];
        memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
 
index 5660adcf7a042ba675026a8397759618fd2a56b3..91b0d5671649c30b95fbb02a1b0a20636435a263 100644 (file)
@@ -2527,6 +2527,13 @@ errout_free:
 
 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       if (cb->strict_check) {
+               int err = ip_valid_fib_dump_req(cb->nlh, cb->extack);
+
+               if (err < 0)
+                       return err;
+       }
+
        return mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter,
                                _ipmr_fill_mroute, &mfc_unres_lock);
 }
@@ -2710,6 +2717,31 @@ static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
        return true;
 }
 
+static int ipmr_valid_dumplink(const struct nlmsghdr *nlh,
+                              struct netlink_ext_ack *extack)
+{
+       struct ifinfomsg *ifm;
+
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+               NL_SET_ERR_MSG(extack, "ipv4: Invalid header for ipmr link dump");
+               return -EINVAL;
+       }
+
+       if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
+               NL_SET_ERR_MSG(extack, "Invalid data after header in ipmr link dump");
+               return -EINVAL;
+       }
+
+       ifm = nlmsg_data(nlh);
+       if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
+           ifm->ifi_change || ifm->ifi_index) {
+               NL_SET_ERR_MSG(extack, "Invalid values in header for ipmr link dump request");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
@@ -2718,6 +2750,13 @@ static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
        unsigned int e = 0, s_e;
        struct mr_table *mrt;
 
+       if (cb->strict_check) {
+               int err = ipmr_valid_dumplink(cb->nlh, cb->extack);
+
+               if (err < 0)
+                       return err;
+       }
+
        s_t = cb->args[0];
        s_e = cb->args[1];
 
index 04311f7067e2e9e3dafb89aa4f8e30dab0fde854..6d218f5a2e712582c5deea1aa3d50239031e090f 100644 (file)
@@ -5,8 +5,8 @@
 #include <net/net_namespace.h>
 #include <net/tcp.h>
 
-int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len,
-                      u32 *metrics)
+static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
+                             int fc_mx_len, u32 *metrics)
 {
        bool ecn_ca = false;
        struct nlattr *nla;
@@ -52,4 +52,28 @@ int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(ip_metrics_convert);
+
+struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+                                       int fc_mx_len)
+{
+       struct dst_metrics *fib_metrics;
+       int err;
+
+       if (!fc_mx)
+               return (struct dst_metrics *)&dst_default_metrics;
+
+       fib_metrics = kzalloc(sizeof(*fib_metrics), GFP_KERNEL);
+       if (unlikely(!fib_metrics))
+               return ERR_PTR(-ENOMEM);
+
+       err = ip_metrics_convert(net, fc_mx, fc_mx_len, fib_metrics->metrics);
+       if (!err) {
+               refcount_set(&fib_metrics->refcnt, 1);
+       } else {
+               kfree(fib_metrics);
+               fib_metrics = ERR_PTR(err);
+       }
+
+       return fib_metrics;
+}
+EXPORT_SYMBOL_GPL(ip_fib_metrics_init);
index 6115bf1ff6f0a16f5114a095646808ab2ef63405..78a67f961d86dafe09c2b9b4ccff1709a88261e4 100644 (file)
@@ -264,7 +264,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
 
        return nf_nat_inet_fn(priv, skb, state);
 }
-EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn);
 
 static unsigned int
 nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
index ad3aeff152ede37e5d39b0e5bcf05a0ad5c0904e..a9d5e013e5556a5bace7afcb61cabeb0849261d1 100644 (file)
@@ -104,12 +104,26 @@ static int masq_device_event(struct notifier_block *this,
        return NOTIFY_DONE;
 }
 
+static int inet_cmp(struct nf_conn *ct, void *ptr)
+{
+       struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+       struct net_device *dev = ifa->ifa_dev->dev;
+       struct nf_conntrack_tuple *tuple;
+
+       if (!device_cmp(ct, (void *)(long)dev->ifindex))
+               return 0;
+
+       tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+       return ifa->ifa_address == tuple->dst.u3.ip;
+}
+
 static int masq_inet_event(struct notifier_block *this,
                           unsigned long event,
                           void *ptr)
 {
        struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
-       struct netdev_notifier_info info;
+       struct net *net = dev_net(idev->dev);
 
        /* The masq_dev_notifier will catch the case of the device going
         * down.  So if the inetdev is dead and being destroyed we have
@@ -119,8 +133,10 @@ static int masq_inet_event(struct notifier_block *this,
        if (idev->dead)
                return NOTIFY_DONE;
 
-       netdev_notifier_info_init(&info, idev->dev);
-       return masq_device_event(this, event, &info);
+       if (event == NETDEV_DOWN)
+               nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0);
+
+       return NOTIFY_DONE;
 }
 
 static struct notifier_block masq_dev_notifier = {
index 8d7aaf118a30106030d26780dd1c6e3ae5b90ba9..7ccb5f87f70b90ebb33e514dd75f158b1c4f55f7 100644 (file)
@@ -779,7 +779,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        }
 
        if (ipv4_is_multicast(daddr)) {
-               if (!ipc.oif)
+               if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
                        ipc.oif = inet->mc_index;
                if (!saddr)
                        saddr = inet->mc_addr;
index 33df4d76db2d948d620ffc809574b364ae24ad4b..8ca3eb06ba04246ce9f53045488f1da57dc8e689 100644 (file)
@@ -608,7 +608,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                tos |= RTO_ONLINK;
 
        if (ipv4_is_multicast(daddr)) {
-               if (!ipc.oif)
+               if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
                        ipc.oif = inet->mc_index;
                if (!saddr)
                        saddr = inet->mc_addr;
index dce2ed66ebe17fcfe6a2c612321c6284ae7bd6f5..f71d2395c42805c2720d00cebf87a88da80e90ef 100644 (file)
@@ -1217,18 +1217,15 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
                src = ip_hdr(skb)->saddr;
        else {
                struct fib_result res;
-               struct flowi4 fl4;
-               struct iphdr *iph;
-
-               iph = ip_hdr(skb);
-
-               memset(&fl4, 0, sizeof(fl4));
-               fl4.daddr = iph->daddr;
-               fl4.saddr = iph->saddr;
-               fl4.flowi4_tos = RT_TOS(iph->tos);
-               fl4.flowi4_oif = rt->dst.dev->ifindex;
-               fl4.flowi4_iif = skb->dev->ifindex;
-               fl4.flowi4_mark = skb->mark;
+               struct iphdr *iph = ip_hdr(skb);
+               struct flowi4 fl4 = {
+                       .daddr = iph->daddr,
+                       .saddr = iph->saddr,
+                       .flowi4_tos = RT_TOS(iph->tos),
+                       .flowi4_oif = rt->dst.dev->ifindex,
+                       .flowi4_iif = skb->dev->ifindex,
+                       .flowi4_mark = skb->mark,
+               };
 
                rcu_read_lock();
                if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
@@ -1479,12 +1476,9 @@ void rt_del_uncached_list(struct rtable *rt)
 
 static void ipv4_dst_destroy(struct dst_entry *dst)
 {
-       struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
        struct rtable *rt = (struct rtable *)dst;
 
-       if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
-               kfree(p);
-
+       ip_dst_metrics_put(dst);
        rt_del_uncached_list(rt);
 }
 
@@ -1531,11 +1525,8 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
                        rt->rt_gateway = nh->nh_gw;
                        rt->rt_uses_gateway = 1;
                }
-               dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
-               if (fi->fib_metrics != &dst_default_metrics) {
-                       rt->dst._metrics |= DST_METRICS_REFCOUNTED;
-                       refcount_inc(&fi->fib_metrics->refcnt);
-               }
+               ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
+
 #ifdef CONFIG_IP_ROUTE_CLASSID
                rt->dst.tclassid = nh->nh_tclassid;
 #endif
@@ -2783,7 +2774,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
        struct rtable *rt = NULL;
        struct sk_buff *skb;
        struct rtmsg *rtm;
-       struct flowi4 fl4;
+       struct flowi4 fl4 = {};
        __be32 dst = 0;
        __be32 src = 0;
        kuid_t uid;
@@ -2823,7 +2814,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
        if (!skb)
                return -ENOBUFS;
 
-       memset(&fl4, 0, sizeof(fl4));
        fl4.daddr = dst;
        fl4.saddr = src;
        fl4.flowi4_tos = rtm->rtm_tos;
index b92f422f2fa805cd5cca8264fe9ae5aa6d6a65b8..891ed2f91467b9345743682a3dd6e818acb48fbd 100644 (file)
@@ -48,6 +48,7 @@ static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 static int comp_sack_nr_max = 255;
+static u32 u32_max_div_HZ = UINT_MAX / HZ;
 
 /* obsolete */
 static int sysctl_tcp_low_latency __read_mostly;
@@ -745,9 +746,10 @@ static struct ctl_table ipv4_net_table[] = {
        {
                .procname       = "tcp_probe_interval",
                .data           = &init_net.ipv4.sysctl_tcp_probe_interval,
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(u32),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_douintvec_minmax,
+               .extra2         = &u32_max_div_HZ,
        },
        {
                .procname       = "igmp_link_local_mcast_reports",
index 69c236943f56bd0749e5efb18de97e69898f1bde..43ef83b2330e6238a55c9843580a585d87708e0c 100644 (file)
@@ -1753,6 +1753,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
        struct vm_area_struct *vma;
        struct sk_buff *skb = NULL;
        struct tcp_sock *tp;
+       int inq;
        int ret;
 
        if (address & (PAGE_SIZE - 1) || address != zc->address)
@@ -1773,12 +1774,15 @@ static int tcp_zerocopy_receive(struct sock *sk,
 
        tp = tcp_sk(sk);
        seq = tp->copied_seq;
-       zc->length = min_t(u32, zc->length, tcp_inq(sk));
+       inq = tcp_inq(sk);
+       zc->length = min_t(u32, zc->length, inq);
        zc->length &= ~(PAGE_SIZE - 1);
-
-       zap_page_range(vma, address, zc->length);
-
-       zc->recv_skip_hint = 0;
+       if (zc->length) {
+               zap_page_range(vma, address, zc->length);
+               zc->recv_skip_hint = 0;
+       } else {
+               zc->recv_skip_hint = inq;
+       }
        ret = 0;
        while (length + PAGE_SIZE <= zc->length) {
                if (zc->recv_skip_hint < PAGE_SIZE) {
@@ -1801,8 +1805,17 @@ static int tcp_zerocopy_receive(struct sock *sk,
                                frags++;
                        }
                }
-               if (frags->size != PAGE_SIZE || frags->page_offset)
+               if (frags->size != PAGE_SIZE || frags->page_offset) {
+                       int remaining = zc->recv_skip_hint;
+
+                       while (remaining && (frags->size != PAGE_SIZE ||
+                                            frags->page_offset)) {
+                               remaining -= frags->size;
+                               frags++;
+                       }
+                       zc->recv_skip_hint -= remaining;
                        break;
+               }
                ret = vm_insert_page(vma, address + length,
                                     skb_frag_page(frags));
                if (ret)
@@ -2403,16 +2416,10 @@ adjudge_to_death:
        sock_hold(sk);
        sock_orphan(sk);
 
-       /* It is the last release_sock in its life. It will remove backlog. */
-       release_sock(sk);
-
-
-       /* Now socket is owned by kernel and we acquire BH lock
-        *  to finish close. No need to check for user refs.
-        */
        local_bh_disable();
        bh_lock_sock(sk);
-       WARN_ON(sock_owned_by_user(sk));
+       /* remove backlog if any, without releasing ownership. */
+       __release_sock(sk);
 
        percpu_counter_inc(sk->sk_prot->orphan_count);
 
@@ -2481,6 +2488,7 @@ adjudge_to_death:
 out:
        bh_unlock_sock(sk);
        local_bh_enable();
+       release_sock(sk);
        sock_put(sk);
 }
 EXPORT_SYMBOL(tcp_close);
@@ -3896,8 +3904,8 @@ void __init tcp_init(void)
        init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
 
        init_net.ipv4.sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
-       init_net.ipv4.sysctl_tcp_rmem[1] = 87380;
-       init_net.ipv4.sysctl_tcp_rmem[2] = max(87380, max_rshare);
+       init_net.ipv4.sysctl_tcp_rmem[1] = 131072;
+       init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare);
 
        pr_info("Hash tables configured (established %u bind %u)\n",
                tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
index d703a0b3b6a2f0efd8607354c1c74ac1a8e78d4f..188980c58f873ffdc81c018dcab0996f603cd7eb 100644 (file)
@@ -426,26 +426,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
        }
 }
 
-/* 3. Tuning rcvbuf, when connection enters established state. */
-static void tcp_fixup_rcvbuf(struct sock *sk)
-{
-       u32 mss = tcp_sk(sk)->advmss;
-       int rcvmem;
-
-       rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) *
-                tcp_default_init_rwnd(mss);
-
-       /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency
-        * Allow enough cushion so that sender is not limited by our window
-        */
-       if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)
-               rcvmem <<= 2;
-
-       if (sk->sk_rcvbuf < rcvmem)
-               sk->sk_rcvbuf = min(rcvmem, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
-}
-
-/* 4. Try to fixup all. It is made immediately after connection enters
+/* 3. Try to fixup all. It is made immediately after connection enters
  *    established state.
  */
 void tcp_init_buffer_space(struct sock *sk)
@@ -454,12 +435,10 @@ void tcp_init_buffer_space(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        int maxwin;
 
-       if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
-               tcp_fixup_rcvbuf(sk);
        if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
                tcp_sndbuf_expand(sk);
 
-       tp->rcvq_space.space = tp->rcv_wnd;
+       tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss);
        tcp_mstamp_refresh(tp);
        tp->rcvq_space.time = tp->tcp_mstamp;
        tp->rcvq_space.seq = tp->copied_seq;
@@ -485,7 +464,7 @@ void tcp_init_buffer_space(struct sock *sk)
        tp->snd_cwnd_stamp = tcp_jiffies32;
 }
 
-/* 5. Recalculate window clamp after socket hit its memory bounds. */
+/* 4. Recalculate window clamp after socket hit its memory bounds. */
 static void tcp_clamp_window(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -6023,11 +6002,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                        if (th->fin)
                                goto discard;
                        /* It is possible that we process SYN packets from backlog,
-                        * so we need to make sure to disable BH right there.
+                        * so we need to make sure to disable BH and RCU right there.
                         */
+                       rcu_read_lock();
                        local_bh_disable();
                        acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
                        local_bh_enable();
+                       rcu_read_unlock();
 
                        if (!acceptable)
                                return 1;
index 1f2496e8620dd78cecefbb0dceb8570fc92661e5..de47038afdf0261f964f346fe3b3febf9b1652ce 100644 (file)
@@ -943,9 +943,11 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
+               rcu_read_lock();
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq_opt_deref(ireq));
+                                           rcu_dereference(ireq->ireq_opt));
+               rcu_read_unlock();
                err = net_xmit_eval(err);
        }
 
index fe7855b090e4feed6a7d1ba6ee874cdb23a9bd0c..059b67af28b137fb9566eaef370b270fc424bffb 100644 (file)
@@ -195,21 +195,6 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
        inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 }
 
-
-u32 tcp_default_init_rwnd(u32 mss)
-{
-       /* Initial receive window should be twice of TCP_INIT_CWND to
-        * enable proper sending of new unsent data during fast recovery
-        * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
-        * limit when mss is larger than 1460.
-        */
-       u32 init_rwnd = TCP_INIT_CWND * 2;
-
-       if (mss > 1460)
-               init_rwnd = max((1460 * init_rwnd) / mss, 2U);
-       return init_rwnd;
-}
-
 /* Determine a window scaling and initial window to offer.
  * Based on the assumption that the given amount of space
  * will be offered. Store the results in the tp structure.
@@ -244,7 +229,10 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
        if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
                (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
        else
-               (*rcv_wnd) = space;
+               (*rcv_wnd) = min_t(u32, space, U16_MAX);
+
+       if (init_rcv_wnd)
+               *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
 
        (*rcv_wscale) = 0;
        if (wscale_ok) {
@@ -257,11 +245,6 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
                        (*rcv_wscale)++;
                }
        }
-
-       if (!init_rcv_wnd) /* Use default unless specified otherwise */
-               init_rcv_wnd = tcp_default_init_rwnd(mss);
-       *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
-
        /* Set the clamp no higher than max representable value */
        (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
 }
index 4f661e178da8465203266ff4dfa3e8743e60ff82..61023d50cd604d5e19464a32c33b65d29c75c81e 100644 (file)
@@ -758,7 +758,7 @@ void tcp_init_xmit_timers(struct sock *sk)
 {
        inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
                                  &tcp_keepalive_timer);
-       hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_TAI,
+       hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
                     HRTIMER_MODE_ABS_PINNED_SOFT);
        tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
 
index 7d69dd6fa7e8c63929a27edad74fb0d6f9f3ee31..1bec2203d558392c479e31d234b041f34daab86e 100644 (file)
@@ -1042,7 +1042,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        }
 
        if (ipv4_is_multicast(daddr)) {
-               if (!ipc.oif)
+               if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
                        ipc.oif = inet->mc_index;
                if (!saddr)
                        saddr = inet->mc_addr;
@@ -1889,7 +1889,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        return 0;
 }
 
-static DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
+DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
 void udp_encap_enable(void)
 {
        static_branch_enable(&udp_encap_needed_key);
index 0c0522b79b43f09785ce8fd5f0dc9461a93f0e98..802f2bc00d69751a40856a6f4f82b32bd244617d 100644 (file)
@@ -405,7 +405,7 @@ static struct sk_buff *udp4_gro_receive(struct list_head *head,
 {
        struct udphdr *uh = udp_gro_udphdr(skb);
 
-       if (unlikely(!uh))
+       if (unlikely(!uh) || !static_branch_unlikely(&udp_encap_needed_key))
                goto flush;
 
        /* Don't bother verifying checksum if we're going to flush anyway. */
index bcfc00e88756dabb1f491d3d41137ccbc7ab1cbc..f8de2482a52923709ed58c401785a6ac60771932 100644 (file)
@@ -67,6 +67,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
 
        if (xo && (xo->flags & XFRM_GRO)) {
                skb_mac_header_rebuild(skb);
+               skb_reset_transport_header(skb);
                return 0;
        }
 
index 3d36644890bb6d3b0a755c811c60e920ad5cd8b8..1ad2c2c4e250f84b1ad73020c727ea8b68b3e0d3 100644 (file)
@@ -46,7 +46,6 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        int ihl = skb->data - skb_transport_header(skb);
-       struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (skb->transport_header != skb->network_header) {
                memmove(skb_transport_header(skb),
@@ -54,8 +53,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
                skb->network_header = skb->transport_header;
        }
        ip_hdr(skb)->tot_len = htons(skb->len + ihl);
-       if (!xo || !(xo->flags & XFRM_GRO))
-               skb_reset_transport_header(skb);
+       skb_reset_transport_header(skb);
        return 0;
 }
 
index a9a317322388632b326a40d056c7338a8c3cc328..2496b12bf721e78aae9ca1fb20b72806bf01005a 100644 (file)
@@ -666,6 +666,7 @@ errout:
 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
                                      struct netlink_callback *cb)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
        struct net *net = sock_net(skb->sk);
        int h, s_h;
        int idx, s_idx;
@@ -673,6 +674,21 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
        struct inet6_dev *idev;
        struct hlist_head *head;
 
+       if (cb->strict_check) {
+               struct netlink_ext_ack *extack = cb->extack;
+               struct netconfmsg *ncm;
+
+               if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
+                       NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
+                       return -EINVAL;
+               }
+
+               if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
+                       NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
+                       return -EINVAL;
+               }
+       }
+
        s_h = cb->args[0];
        s_idx = idx = cb->args[1];
 
@@ -692,7 +708,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
                        if (inet6_netconf_fill_devconf(skb, dev->ifindex,
                                                       &idev->cnf,
                                                       NETLINK_CB(cb->skb).portid,
-                                                      cb->nlh->nlmsg_seq,
+                                                      nlh->nlmsg_seq,
                                                       RTM_NEWNETCONF,
                                                       NLM_F_MULTI,
                                                       NETCONFA_ALL) < 0) {
@@ -709,7 +725,7 @@ cont:
                if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
                                               net->ipv6.devconf_all,
                                               NETLINK_CB(cb->skb).portid,
-                                              cb->nlh->nlmsg_seq,
+                                              nlh->nlmsg_seq,
                                               RTM_NEWNETCONF, NLM_F_MULTI,
                                               NETCONFA_ALL) < 0)
                        goto done;
@@ -720,7 +736,7 @@ cont:
                if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
                                               net->ipv6.devconf_dflt,
                                               NETLINK_CB(cb->skb).portid,
-                                              cb->nlh->nlmsg_seq,
+                                              nlh->nlmsg_seq,
                                               RTM_NEWNETCONF, NLM_F_MULTI,
                                               NETCONFA_ALL) < 0)
                        goto done;
@@ -4793,12 +4809,19 @@ static inline int inet6_ifaddr_msgsize(void)
               + nla_total_size(4)  /* IFA_RT_PRIORITY */;
 }
 
+enum addr_type_t {
+       UNICAST_ADDR,
+       MULTICAST_ADDR,
+       ANYCAST_ADDR,
+};
+
 struct inet6_fill_args {
        u32 portid;
        u32 seq;
        int event;
        unsigned int flags;
        int netnsid;
+       enum addr_type_t type;
 };
 
 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
@@ -4930,39 +4953,28 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
        return 0;
 }
 
-enum addr_type_t {
-       UNICAST_ADDR,
-       MULTICAST_ADDR,
-       ANYCAST_ADDR,
-};
-
 /* called with rcu_read_lock() */
 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
-                         struct netlink_callback *cb, enum addr_type_t type,
-                         int s_ip_idx, int *p_ip_idx, int netnsid)
+                         struct netlink_callback *cb,
+                         int s_ip_idx, int *p_ip_idx,
+                         struct inet6_fill_args *fillargs)
 {
-       struct inet6_fill_args fillargs = {
-               .portid = NETLINK_CB(cb->skb).portid,
-               .seq = cb->nlh->nlmsg_seq,
-               .flags = NLM_F_MULTI,
-               .netnsid = netnsid,
-       };
        struct ifmcaddr6 *ifmca;
        struct ifacaddr6 *ifaca;
        int err = 1;
        int ip_idx = *p_ip_idx;
 
        read_lock_bh(&idev->lock);
-       switch (type) {
+       switch (fillargs->type) {
        case UNICAST_ADDR: {
                struct inet6_ifaddr *ifa;
-               fillargs.event = RTM_NEWADDR;
+               fillargs->event = RTM_NEWADDR;
 
                /* unicast address incl. temp addr */
                list_for_each_entry(ifa, &idev->addr_list, if_list) {
                        if (++ip_idx < s_ip_idx)
                                continue;
-                       err = inet6_fill_ifaddr(skb, ifa, &fillargs);
+                       err = inet6_fill_ifaddr(skb, ifa, fillargs);
                        if (err < 0)
                                break;
                        nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -4970,26 +4982,26 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
                break;
        }
        case MULTICAST_ADDR:
-               fillargs.event = RTM_GETMULTICAST;
+               fillargs->event = RTM_GETMULTICAST;
 
                /* multicast address */
                for (ifmca = idev->mc_list; ifmca;
                     ifmca = ifmca->next, ip_idx++) {
                        if (ip_idx < s_ip_idx)
                                continue;
-                       err = inet6_fill_ifmcaddr(skb, ifmca, &fillargs);
+                       err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
                        if (err < 0)
                                break;
                }
                break;
        case ANYCAST_ADDR:
-               fillargs.event = RTM_GETANYCAST;
+               fillargs->event = RTM_GETANYCAST;
                /* anycast address */
                for (ifaca = idev->ac_list; ifaca;
                     ifaca = ifaca->aca_next, ip_idx++) {
                        if (ip_idx < s_ip_idx)
                                continue;
-                       err = inet6_fill_ifacaddr(skb, ifaca, &fillargs);
+                       err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
                        if (err < 0)
                                break;
                }
@@ -5002,13 +5014,71 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
        return err;
 }
 
+static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
+                                      struct inet6_fill_args *fillargs,
+                                      struct net **tgt_net, struct sock *sk,
+                                      struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[IFA_MAX+1];
+       struct ifaddrmsg *ifm;
+       int err, i;
+
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+               NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request");
+               return -EINVAL;
+       }
+
+       ifm = nlmsg_data(nlh);
+       if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
+               NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request");
+               return -EINVAL;
+       }
+       if (ifm->ifa_index) {
+               NL_SET_ERR_MSG_MOD(extack, "Filter by device index not supported for address dump");
+               return -EINVAL;
+       }
+
+       err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
+                                ifa_ipv6_policy, extack);
+       if (err < 0)
+               return err;
+
+       for (i = 0; i <= IFA_MAX; ++i) {
+               if (!tb[i])
+                       continue;
+
+               if (i == IFA_TARGET_NETNSID) {
+                       struct net *net;
+
+                       fillargs->netnsid = nla_get_s32(tb[i]);
+                       net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
+                       if (IS_ERR(net)) {
+                               NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id");
+                               return PTR_ERR(net);
+                       }
+                       *tgt_net = net;
+               } else {
+                       NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
                           enum addr_type_t type)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
+       struct inet6_fill_args fillargs = {
+               .portid = NETLINK_CB(cb->skb).portid,
+               .seq = cb->nlh->nlmsg_seq,
+               .flags = NLM_F_MULTI,
+               .netnsid = -1,
+               .type = type,
+       };
        struct net *net = sock_net(skb->sk);
-       struct nlattr *tb[IFA_MAX+1];
        struct net *tgt_net = net;
-       int netnsid = -1;
        int h, s_h;
        int idx, ip_idx;
        int s_idx, s_ip_idx;
@@ -5020,15 +5090,13 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
        s_idx = idx = cb->args[1];
        s_ip_idx = ip_idx = cb->args[2];
 
-       if (nlmsg_parse(cb->nlh, sizeof(struct ifaddrmsg), tb, IFA_MAX,
-                       ifa_ipv6_policy, NULL) >= 0) {
-               if (tb[IFA_TARGET_NETNSID]) {
-                       netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
+       if (cb->strict_check) {
+               int err;
 
-                       tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
-                       if (IS_ERR(tgt_net))
-                               return PTR_ERR(tgt_net);
-               }
+               err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
+                                                 skb->sk, cb->extack);
+               if (err < 0)
+                       return err;
        }
 
        rcu_read_lock();
@@ -5046,8 +5114,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
                        if (!idev)
                                goto cont;
 
-                       if (in6_dump_addrs(idev, skb, cb, type,
-                                          s_ip_idx, &ip_idx, netnsid) < 0)
+                       if (in6_dump_addrs(idev, skb, cb, s_ip_idx, &ip_idx,
+                                          &fillargs) < 0)
                                goto done;
 cont:
                        idx++;
@@ -5058,7 +5126,7 @@ done:
        cb->args[0] = h;
        cb->args[1] = idx;
        cb->args[2] = ip_idx;
-       if (netnsid >= 0)
+       if (fillargs.netnsid >= 0)
                put_net(tgt_net);
 
        return skb->len;
@@ -5592,6 +5660,31 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
+static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
+                                  struct netlink_ext_ack *extack)
+{
+       struct ifinfomsg *ifm;
+
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+               NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request");
+               return -EINVAL;
+       }
+
+       if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
+               NL_SET_ERR_MSG_MOD(extack, "Invalid data after header");
+               return -EINVAL;
+       }
+
+       ifm = nlmsg_data(nlh);
+       if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
+           ifm->ifi_change || ifm->ifi_index) {
+               NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
@@ -5601,6 +5694,16 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        struct inet6_dev *idev;
        struct hlist_head *head;
 
+       /* only requests using strict checking can pass data to
+        * influence the dump
+        */
+       if (cb->strict_check) {
+               int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
+
+               if (err < 0)
+                       return err;
+       }
+
        s_h = cb->args[0];
        s_idx = cb->args[1];
 
index 1d6ced37ad718398c947cf49b3b486d4b88f3f6f..0d1ee82ee55b9ec64c3f5aa674c3f0aaa293a7e0 100644 (file)
@@ -458,20 +458,52 @@ static int ip6addrlbl_fill(struct sk_buff *skb,
        return 0;
 }
 
+static int ip6addrlbl_valid_dump_req(const struct nlmsghdr *nlh,
+                                    struct netlink_ext_ack *extack)
+{
+       struct ifaddrlblmsg *ifal;
+
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifal))) {
+               NL_SET_ERR_MSG_MOD(extack, "Invalid header for address label dump request");
+               return -EINVAL;
+       }
+
+       ifal = nlmsg_data(nlh);
+       if (ifal->__ifal_reserved || ifal->ifal_prefixlen ||
+           ifal->ifal_flags || ifal->ifal_index || ifal->ifal_seq) {
+               NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address label dump request");
+               return -EINVAL;
+       }
+
+       if (nlmsg_attrlen(nlh, sizeof(*ifal))) {
+               NL_SET_ERR_MSG_MOD(extack, "Invalid data after header for address label dump requewst");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
        struct net *net = sock_net(skb->sk);
        struct ip6addrlbl_entry *p;
        int idx = 0, s_idx = cb->args[0];
        int err;
 
+       if (cb->strict_check) {
+               err = ip6addrlbl_valid_dump_req(nlh, cb->extack);
+               if (err < 0)
+                       return err;
+       }
+
        rcu_read_lock();
        hlist_for_each_entry_rcu(p, &net->ipv6.ip6addrlbl_table.head, list) {
                if (idx >= s_idx) {
                        err = ip6addrlbl_fill(skb, p,
                                              net->ipv6.ip6addrlbl_table.seq,
                                              NETLINK_CB(cb->skb).portid,
-                                             cb->nlh->nlmsg_seq,
+                                             nlh->nlmsg_seq,
                                              RTM_NEWADDRLABEL,
                                              NLM_F_MULTI);
                        if (err < 0)
index 88a7579c23bdb3ae432a126bb0781493bc8d60b7..63b2b66f9dfae8f50bfe3ca6230b81585b673bf4 100644 (file)
@@ -601,12 +601,11 @@ static void esp_input_done_esn(struct crypto_async_request *base, int err)
 
 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-       struct ip_esp_hdr *esph;
        struct crypto_aead *aead = x->data;
        struct aead_request *req;
        struct sk_buff *trailer;
        int ivlen = crypto_aead_ivsize(aead);
-       int elen = skb->len - sizeof(*esph) - ivlen;
+       int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
        int nfrags;
        int assoclen;
        int seqhilen;
@@ -616,7 +615,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
        u8 *iv;
        struct scatterlist *sg;
 
-       if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) {
+       if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
                ret = -EINVAL;
                goto out;
        }
@@ -626,7 +625,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
                goto out;
        }
 
-       assoclen = sizeof(*esph);
+       assoclen = sizeof(struct ip_esp_hdr);
        seqhilen = 0;
 
        if (x->props.flags & XFRM_STATE_ESN) {
index 5516f55e214bd85ff7a07cf8c24648db327902c2..e14d244c551f3670f412ebf180e3bd5dddc9eabe 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 
+#include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 #include <net/addrconf.h>
@@ -160,8 +161,6 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags)
        }
 
        INIT_LIST_HEAD(&f6i->fib6_siblings);
-       f6i->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
-
        atomic_inc(&f6i->fib6_ref);
 
        return f6i;
@@ -171,7 +170,6 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
 {
        struct fib6_info *f6i = container_of(head, struct fib6_info, rcu);
        struct rt6_exception_bucket *bucket;
-       struct dst_metrics *m;
 
        WARN_ON(f6i->fib6_node);
 
@@ -203,9 +201,7 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
        if (f6i->fib6_nh.nh_dev)
                dev_put(f6i->fib6_nh.nh_dev);
 
-       m = f6i->fib6_metrics;
-       if (m != &dst_default_metrics && refcount_dec_and_test(&m->refcnt))
-               kfree(m);
+       ip_fib_metrics_put(f6i->fib6_metrics);
 
        kfree(f6i);
 }
@@ -568,6 +564,7 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
 
 static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
        struct net *net = sock_net(skb->sk);
        unsigned int h, s_h;
        unsigned int e = 0, s_e;
@@ -577,6 +574,13 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
        struct hlist_head *head;
        int res = 0;
 
+       if (cb->strict_check) {
+               int err = ip_valid_fib_dump_req(nlh, cb->extack);
+
+               if (err < 0)
+                       return err;
+       }
+
        s_h = cb->args[0];
        s_e = cb->args[1];
 
index d0b7e0249c133619fbb081881e054cab393ebb49..d7563ef76518482dfac68386b6946e9677f1708e 100644 (file)
@@ -85,7 +85,8 @@ static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
 static void ip6mr_free_table(struct mr_table *mrt);
 
 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
-                          struct sk_buff *skb, struct mfc6_cache *cache);
+                          struct net_device *dev, struct sk_buff *skb,
+                          struct mfc6_cache *cache);
 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
                              mifi_t mifi, int assert);
 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
@@ -138,6 +139,9 @@ static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
                .flags = FIB_LOOKUP_NOREF,
        };
 
+       /* update flow if oif or iif point to device enslaved to l3mdev */
+       l3mdev_update_flow(net, flowi6_to_flowi(flp6));
+
        err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
                               flowi6_to_flowi(flp6), 0, &arg);
        if (err < 0)
@@ -164,7 +168,9 @@ static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
                return -EINVAL;
        }
 
-       mrt = ip6mr_get_table(rule->fr_net, rule->table);
+       arg->table = fib_rule_get_table(rule, arg);
+
+       mrt = ip6mr_get_table(rule->fr_net, arg->table);
        if (!mrt)
                return -EAGAIN;
        res->mrt = mrt;
@@ -1014,7 +1020,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
                        }
                        rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
                } else
-                       ip6_mr_forward(net, mrt, skb, c);
+                       ip6_mr_forward(net, mrt, skb->dev, skb, c);
        }
 }
 
@@ -1120,7 +1126,7 @@ static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
 
 /* Queue a packet for resolution. It gets locked cache entry! */
 static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
-                                 struct sk_buff *skb)
+                                 struct sk_buff *skb, struct net_device *dev)
 {
        struct mfc6_cache *c;
        bool found = false;
@@ -1180,6 +1186,10 @@ static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
                kfree_skb(skb);
                err = -ENOBUFS;
        } else {
+               if (dev) {
+                       skb->dev = dev;
+                       skb->skb_iif = dev->ifindex;
+               }
                skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
                err = 0;
        }
@@ -2043,11 +2053,12 @@ static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
 }
 
 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
-                          struct sk_buff *skb, struct mfc6_cache *c)
+                          struct net_device *dev, struct sk_buff *skb,
+                          struct mfc6_cache *c)
 {
        int psend = -1;
        int vif, ct;
-       int true_vifi = ip6mr_find_vif(mrt, skb->dev);
+       int true_vifi = ip6mr_find_vif(mrt, dev);
 
        vif = c->_c.mfc_parent;
        c->_c.mfc_un.res.pkt++;
@@ -2073,7 +2084,7 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
        /*
         * Wrong interface: drop packet and (maybe) send PIM assert.
         */
-       if (mrt->vif_table[vif].dev != skb->dev) {
+       if (mrt->vif_table[vif].dev != dev) {
                c->_c.mfc_un.res.wrong_if++;
 
                if (true_vifi >= 0 && mrt->mroute_do_assert &&
@@ -2154,6 +2165,19 @@ int ip6_mr_input(struct sk_buff *skb)
                .flowi6_mark    = skb->mark,
        };
        int err;
+       struct net_device *dev;
+
+       /* skb->dev passed in is the master dev for vrfs.
+        * Get the proper interface that does have a vif associated with it.
+        */
+       dev = skb->dev;
+       if (netif_is_l3_master(skb->dev)) {
+               dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
+               if (!dev) {
+                       kfree_skb(skb);
+                       return -ENODEV;
+               }
+       }
 
        err = ip6mr_fib_lookup(net, &fl6, &mrt);
        if (err < 0) {
@@ -2165,7 +2189,7 @@ int ip6_mr_input(struct sk_buff *skb)
        cache = ip6mr_cache_find(mrt,
                                 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
        if (!cache) {
-               int vif = ip6mr_find_vif(mrt, skb->dev);
+               int vif = ip6mr_find_vif(mrt, dev);
 
                if (vif >= 0)
                        cache = ip6mr_cache_find_any(mrt,
@@ -2179,9 +2203,9 @@ int ip6_mr_input(struct sk_buff *skb)
        if (!cache) {
                int vif;
 
-               vif = ip6mr_find_vif(mrt, skb->dev);
+               vif = ip6mr_find_vif(mrt, dev);
                if (vif >= 0) {
-                       int err = ip6mr_cache_unresolved(mrt, vif, skb);
+                       int err = ip6mr_cache_unresolved(mrt, vif, skb, dev);
                        read_unlock(&mrt_lock);
 
                        return err;
@@ -2191,7 +2215,7 @@ int ip6_mr_input(struct sk_buff *skb)
                return -ENODEV;
        }
 
-       ip6_mr_forward(net, mrt, skb, cache);
+       ip6_mr_forward(net, mrt, dev, skb, cache);
 
        read_unlock(&mrt_lock);
 
@@ -2257,7 +2281,7 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
                iph->saddr = rt->rt6i_src.addr;
                iph->daddr = rt->rt6i_dst.addr;
 
-               err = ip6mr_cache_unresolved(mrt, vif, skb2);
+               err = ip6mr_cache_unresolved(mrt, vif, skb2, dev);
                read_unlock(&mrt_lock);
 
                return err;
@@ -2433,6 +2457,15 @@ errout:
 
 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
+
+       if (cb->strict_check) {
+               int err = ip_valid_fib_dump_req(nlh, cb->extack);
+
+               if (err < 0)
+                       return err;
+       }
+
        return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
                                _ip6mr_fill_mroute, &mfc_unres_lock);
 }
index 0ec273997d1dc6eff71f62c66bbe214e369ab8f9..51863ada15a4fd73a1ffd99862730420a9f77e7c 100644 (file)
@@ -1533,7 +1533,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
 
        if (!ndopts.nd_opts_rh) {
                ip6_redirect_no_header(skb, dev_net(skb->dev),
-                                       skb->dev->ifindex, 0);
+                                       skb->dev->ifindex);
                return;
        }
 
index 8b147440fbdced8dbc23023785596f0565b6ddef..af737b47b9b56d28ce02c93ecd2870084cc4c265 100644 (file)
@@ -65,7 +65,10 @@ ipv6header_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                }
 
                hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
-               BUG_ON(hp == NULL);
+               if (!hp) {
+                       par->hotdrop = true;
+                       return false;
+               }
 
                /* Calculate the header length */
                if (nexthdr == NEXTHDR_FRAGMENT)
index 2c99b94eeca3221bbc0887e82d1afe4e53e9efce..21bf6bf043232ac3fef34e5dba142dfad8e31b77 100644 (file)
@@ -137,7 +137,10 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                                                        sizeof(_addr),
                                                        &_addr);
 
-                               BUG_ON(ap == NULL);
+                               if (ap == NULL) {
+                                       par->hotdrop = true;
+                                       return false;
+                               }
 
                                if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
                                        pr_debug("i=%d temp=%d;\n", i, temp);
@@ -166,7 +169,10 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                                                        + temp * sizeof(_addr),
                                                        sizeof(_addr),
                                                        &_addr);
-                               BUG_ON(ap == NULL);
+                               if (ap == NULL) {
+                                       par->hotdrop = true;
+                                       return false;
+                               }
 
                                if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp]))
                                        break;
index e6eb7cf9b54fd5e5c81b14836b0d629d5cccfd6d..3e4bf2286abea96617f8df1ecac74d91667ef59f 100644 (file)
@@ -87,18 +87,30 @@ static struct notifier_block masq_dev_notifier = {
 struct masq_dev_work {
        struct work_struct work;
        struct net *net;
+       struct in6_addr addr;
        int ifindex;
 };
 
+static int inet_cmp(struct nf_conn *ct, void *work)
+{
+       struct masq_dev_work *w = (struct masq_dev_work *)work;
+       struct nf_conntrack_tuple *tuple;
+
+       if (!device_cmp(ct, (void *)(long)w->ifindex))
+               return 0;
+
+       tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+       return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);
+}
+
 static void iterate_cleanup_work(struct work_struct *work)
 {
        struct masq_dev_work *w;
-       long index;
 
        w = container_of(work, struct masq_dev_work, work);
 
-       index = w->ifindex;
-       nf_ct_iterate_cleanup_net(w->net, device_cmp, (void *)index, 0, 0);
+       nf_ct_iterate_cleanup_net(w->net, inet_cmp, (void *)w, 0, 0);
 
        put_net(w->net);
        kfree(w);
@@ -147,6 +159,7 @@ static int masq_inet_event(struct notifier_block *this,
                INIT_WORK(&w->work, iterate_cleanup_work);
                w->ifindex = dev->ifindex;
                w->net = net;
+               w->addr = ifa->addr;
                schedule_work(&w->work);
 
                return NOTIFY_DONE;
index 413d98bf24f4c9f9644b79590369b9188713926e..5e0efd3954e90ade89eb4da17cd5ecef1894a1a3 100644 (file)
@@ -651,8 +651,6 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
        skb->tstamp = sockc->transmit_time;
-       skb_dst_set(skb, &rt->dst);
-       *dstp = NULL;
 
        skb_put(skb, length);
        skb_reset_network_header(skb);
@@ -665,8 +663,14 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
 
        skb->transport_header = skb->network_header;
        err = memcpy_from_msg(iph, msg, length);
-       if (err)
-               goto error_fault;
+       if (err) {
+               err = -EFAULT;
+               kfree_skb(skb);
+               goto error;
+       }
+
+       skb_dst_set(skb, &rt->dst);
+       *dstp = NULL;
 
        /* if egress device is enslaved to an L3 master device pass the
         * skb to its handler for processing
@@ -675,21 +679,28 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
        if (unlikely(!skb))
                return 0;
 
+       /* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev
+        * in the error path. Since skb has been freed, the dst could
+        * have been queued for deletion.
+        */
+       rcu_read_lock();
        IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
        err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
                      NULL, rt->dst.dev, dst_output);
        if (err > 0)
                err = net_xmit_errno(err);
-       if (err)
-               goto error;
+       if (err) {
+               IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+               rcu_read_unlock();
+               goto error_check;
+       }
+       rcu_read_unlock();
 out:
        return 0;
 
-error_fault:
-       err = -EFAULT;
-       kfree_skb(skb);
 error:
        IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+error_check:
        if (err == -ENOBUFS && !np->recverr)
                err = 0;
        return err;
index d28f83e0159339e4a18e539ede86186da90fad2c..7c38e0e058aeafc065f8790e9d381c2d13b70f47 100644 (file)
@@ -364,14 +364,11 @@ EXPORT_SYMBOL(ip6_dst_alloc);
 
 static void ip6_dst_destroy(struct dst_entry *dst)
 {
-       struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
        struct rt6_info *rt = (struct rt6_info *)dst;
        struct fib6_info *from;
        struct inet6_dev *idev;
 
-       if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
-               kfree(p);
-
+       ip_dst_metrics_put(dst);
        rt6_uncached_list_del(rt);
 
        idev = rt->rt6i_idev;
@@ -978,11 +975,7 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
 {
        rt->rt6i_flags &= ~RTF_EXPIRES;
        rcu_assign_pointer(rt->from, from);
-       dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
-       if (from->fib6_metrics != &dst_default_metrics) {
-               rt->dst._metrics |= DST_METRICS_REFCOUNTED;
-               refcount_inc(&from->fib6_metrics->refcnt);
-       }
+       ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
 }
 
 /* Caller must already hold reference to @ort */
@@ -2349,15 +2342,14 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
 {
        const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
        struct dst_entry *dst;
-       struct flowi6 fl6;
-
-       memset(&fl6, 0, sizeof(fl6));
-       fl6.flowi6_oif = oif;
-       fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
-       fl6.daddr = iph->daddr;
-       fl6.saddr = iph->saddr;
-       fl6.flowlabel = ip6_flowinfo(iph);
-       fl6.flowi6_uid = uid;
+       struct flowi6 fl6 = {
+               .flowi6_oif = oif,
+               .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
+               .daddr = iph->daddr,
+               .saddr = iph->saddr,
+               .flowlabel = ip6_flowinfo(iph),
+               .flowi6_uid = uid,
+       };
 
        dst = ip6_route_output(net, NULL, &fl6);
        if (!dst->error)
@@ -2508,16 +2500,15 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
 {
        const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
        struct dst_entry *dst;
-       struct flowi6 fl6;
-
-       memset(&fl6, 0, sizeof(fl6));
-       fl6.flowi6_iif = LOOPBACK_IFINDEX;
-       fl6.flowi6_oif = oif;
-       fl6.flowi6_mark = mark;
-       fl6.daddr = iph->daddr;
-       fl6.saddr = iph->saddr;
-       fl6.flowlabel = ip6_flowinfo(iph);
-       fl6.flowi6_uid = uid;
+       struct flowi6 fl6 = {
+               .flowi6_iif = LOOPBACK_IFINDEX,
+               .flowi6_oif = oif,
+               .flowi6_mark = mark,
+               .daddr = iph->daddr,
+               .saddr = iph->saddr,
+               .flowlabel = ip6_flowinfo(iph),
+               .flowi6_uid = uid,
+       };
 
        dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
        rt6_do_redirect(dst, NULL, skb);
@@ -2525,21 +2516,18 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
 }
 EXPORT_SYMBOL_GPL(ip6_redirect);
 
-void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
-                           u32 mark)
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
 {
        const struct ipv6hdr *iph = ipv6_hdr(skb);
        const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
        struct dst_entry *dst;
-       struct flowi6 fl6;
-
-       memset(&fl6, 0, sizeof(fl6));
-       fl6.flowi6_iif = LOOPBACK_IFINDEX;
-       fl6.flowi6_oif = oif;
-       fl6.flowi6_mark = mark;
-       fl6.daddr = msg->dest;
-       fl6.saddr = iph->daddr;
-       fl6.flowi6_uid = sock_net_uid(net, NULL);
+       struct flowi6 fl6 = {
+               .flowi6_iif = LOOPBACK_IFINDEX,
+               .flowi6_oif = oif,
+               .daddr = msg->dest,
+               .saddr = iph->daddr,
+               .flowi6_uid = sock_net_uid(net, NULL),
+       };
 
        dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
        rt6_do_redirect(dst, NULL, skb);
@@ -2710,24 +2698,6 @@ out:
        return entries > rt_max_size;
 }
 
-static int ip6_convert_metrics(struct net *net, struct fib6_info *rt,
-                              struct fib6_config *cfg)
-{
-       struct dst_metrics *p;
-
-       if (!cfg->fc_mx)
-               return 0;
-
-       p = kzalloc(sizeof(*rt->fib6_metrics), GFP_KERNEL);
-       if (unlikely(!p))
-               return -ENOMEM;
-
-       refcount_set(&p->refcnt, 1);
-       rt->fib6_metrics = p;
-
-       return ip_metrics_convert(net, cfg->fc_mx, cfg->fc_mx_len, p->metrics);
-}
-
 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
                                            struct fib6_config *cfg,
                                            const struct in6_addr *gw_addr,
@@ -3003,13 +2973,17 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
        if (!rt)
                goto out;
 
+       rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len);
+       if (IS_ERR(rt->fib6_metrics)) {
+               err = PTR_ERR(rt->fib6_metrics);
+               /* Do not leave garbage there. */
+               rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
+               goto out;
+       }
+
        if (cfg->fc_flags & RTF_ADDRCONF)
                rt->dst_nocount = true;
 
-       err = ip6_convert_metrics(net, rt, cfg);
-       if (err < 0)
-               goto out;
-
        if (cfg->fc_flags & RTF_EXPIRES)
                fib6_set_expires(rt, jiffies +
                                clock_t_to_jiffies(cfg->fc_expires));
@@ -3609,23 +3583,23 @@ static void rtmsg_to_fib6_config(struct net *net,
                                 struct in6_rtmsg *rtmsg,
                                 struct fib6_config *cfg)
 {
-       memset(cfg, 0, sizeof(*cfg));
-
-       cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
-                        : RT6_TABLE_MAIN;
-       cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
-       cfg->fc_metric = rtmsg->rtmsg_metric;
-       cfg->fc_expires = rtmsg->rtmsg_info;
-       cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
-       cfg->fc_src_len = rtmsg->rtmsg_src_len;
-       cfg->fc_flags = rtmsg->rtmsg_flags;
-       cfg->fc_type = rtmsg->rtmsg_type;
+       *cfg = (struct fib6_config){
+               .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
+                        : RT6_TABLE_MAIN,
+               .fc_ifindex = rtmsg->rtmsg_ifindex,
+               .fc_metric = rtmsg->rtmsg_metric,
+               .fc_expires = rtmsg->rtmsg_info,
+               .fc_dst_len = rtmsg->rtmsg_dst_len,
+               .fc_src_len = rtmsg->rtmsg_src_len,
+               .fc_flags = rtmsg->rtmsg_flags,
+               .fc_type = rtmsg->rtmsg_type,
 
-       cfg->fc_nlinfo.nl_net = net;
+               .fc_nlinfo.nl_net = net,
 
-       cfg->fc_dst = rtmsg->rtmsg_dst;
-       cfg->fc_src = rtmsg->rtmsg_src;
-       cfg->fc_gateway = rtmsg->rtmsg_gateway;
+               .fc_dst = rtmsg->rtmsg_dst,
+               .fc_src = rtmsg->rtmsg_src,
+               .fc_gateway = rtmsg->rtmsg_gateway,
+       };
 }
 
 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
@@ -3732,6 +3706,7 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
        if (!f6i)
                return ERR_PTR(-ENOMEM);
 
+       f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0);
        f6i->dst_nocount = true;
        f6i->dst_host = true;
        f6i->fib6_protocol = RTPROT_KERNEL;
@@ -4142,20 +4117,25 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
        int err;
 
        err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
-                         NULL);
+                         extack);
        if (err < 0)
                goto errout;
 
        err = -EINVAL;
        rtm = nlmsg_data(nlh);
-       memset(cfg, 0, sizeof(*cfg));
 
-       cfg->fc_table = rtm->rtm_table;
-       cfg->fc_dst_len = rtm->rtm_dst_len;
-       cfg->fc_src_len = rtm->rtm_src_len;
-       cfg->fc_flags = RTF_UP;
-       cfg->fc_protocol = rtm->rtm_protocol;
-       cfg->fc_type = rtm->rtm_type;
+       *cfg = (struct fib6_config){
+               .fc_table = rtm->rtm_table,
+               .fc_dst_len = rtm->rtm_dst_len,
+               .fc_src_len = rtm->rtm_src_len,
+               .fc_flags = RTF_UP,
+               .fc_protocol = rtm->rtm_protocol,
+               .fc_type = rtm->rtm_type,
+
+               .fc_nlinfo.portid = NETLINK_CB(skb).portid,
+               .fc_nlinfo.nlh = nlh,
+               .fc_nlinfo.nl_net = sock_net(skb->sk),
+       };
 
        if (rtm->rtm_type == RTN_UNREACHABLE ||
            rtm->rtm_type == RTN_BLACKHOLE ||
@@ -4171,10 +4151,6 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
 
-       cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
-       cfg->fc_nlinfo.nlh = nlh;
-       cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
-
        if (tb[RTA_GATEWAY]) {
                cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
                cfg->fc_flags |= RTF_GATEWAY;
@@ -4293,11 +4269,6 @@ static int ip6_route_info_append(struct net *net,
        if (!nh)
                return -ENOMEM;
        nh->fib6_info = rt;
-       err = ip6_convert_metrics(net, rt, r_cfg);
-       if (err) {
-               kfree(nh);
-               return err;
-       }
        memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
        list_add_tail(&nh->next, rt6_nh_list);
 
@@ -4827,7 +4798,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
        struct rt6_info *rt;
        struct sk_buff *skb;
        struct rtmsg *rtm;
-       struct flowi6 fl6;
+       struct flowi6 fl6 = {};
        bool fibmatch;
 
        err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
@@ -4836,7 +4807,6 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                goto errout;
 
        err = -EINVAL;
-       memset(&fl6, 0, sizeof(fl6));
        rtm = nlmsg_data(nlh);
        fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
        fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
index 28c4aa5078fcb34b773875d28790e367a5c98ad6..374e7d302f26c4506bb3269c2208bad41a023374 100644 (file)
@@ -548,7 +548,7 @@ static __inline__ void udpv6_err(struct sk_buff *skb,
        __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
 }
 
-static DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
 void udpv6_encap_enable(void)
 {
        static_branch_enable(&udpv6_encap_needed_key);
index 95dee9ca8d22186486b09ef7514ec69e0985ff3a..1b8e161ac527dd1c2f594680ef532cd7cf453378 100644 (file)
@@ -119,7 +119,7 @@ static struct sk_buff *udp6_gro_receive(struct list_head *head,
 {
        struct udphdr *uh = udp_gro_udphdr(skb);
 
-       if (unlikely(!uh))
+       if (unlikely(!uh) || !static_branch_unlikely(&udpv6_encap_needed_key))
                goto flush;
 
        /* Don't bother verifying checksum if we're going to flush anyway. */
index 841f4a07438e83502eadd6ec6c16a16d1de6aa55..9ef490dddcea23b82bd703217bfdde49dce41069 100644 (file)
@@ -59,6 +59,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
 
        if (xo && (xo->flags & XFRM_GRO)) {
                skb_mac_header_rebuild(skb);
+               skb_reset_transport_header(skb);
                return -1;
        }
 
index 9ad07a91708ef7a1008d469766ab39b9b882883f..3c29da5defe6c357ff04ca4adead1a9fee208f08 100644 (file)
@@ -51,7 +51,6 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        int ihl = skb->data - skb_transport_header(skb);
-       struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (skb->transport_header != skb->network_header) {
                memmove(skb_transport_header(skb),
@@ -60,8 +59,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
        }
        ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
                                           sizeof(struct ipv6hdr));
-       if (!xo || !(xo->flags & XFRM_GRO))
-               skb_reset_transport_header(skb);
+       skb_reset_transport_header(skb);
        return 0;
 }
 
index 5959ce9620eb92ece2830d6a59ed21d562a3a1cf..6a74080005cf6acf15fa59d6d3dd14cbf01a1781 100644 (file)
@@ -170,9 +170,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 
        if (toobig && xfrm6_local_dontfrag(skb)) {
                xfrm6_local_rxpmtu(skb, mtu);
+               kfree_skb(skb);
                return -EMSGSIZE;
        } else if (!skb->ignore_df && toobig && skb->sk) {
                xfrm_local_error(skb, mtu);
+               kfree_skb(skb);
                return -EMSGSIZE;
        }
 
index 504627e2117fd72136e4a09ec544720b3f5a37ea..914aef7e7afdf6351700b0ab2965a023a9bef48d 100644 (file)
@@ -425,7 +425,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_AP_VLAN:
                /* Keys without a station are used for TX only */
-               if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+               if (sta && test_sta_flag(sta, WLAN_STA_MFP))
                        key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
                break;
        case NL80211_IFTYPE_ADHOC:
index 5e6cf2cee965264dd45cda775b370b6dcb022413..5836ddeac9e34ecd2aa6e51363679d2cd11f266d 100644 (file)
@@ -1756,7 +1756,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                if (local->ops->wake_tx_queue &&
                    type != NL80211_IFTYPE_AP_VLAN &&
-                   type != NL80211_IFTYPE_MONITOR)
+                   (type != NL80211_IFTYPE_MONITOR ||
+                    (params->flags & MONITOR_FLAG_ACTIVE)))
                        txq_size += sizeof(struct txq_info) +
                                    local->hw.txq_data_size;
 
index ee56f18cad3f7e89e1c60fe4829dab7bfa1ef340..21526630bf6559fed1ecd1894a4796db5216fd56 100644 (file)
@@ -217,7 +217,8 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_init(void);
 void ieee80211s_update_metric(struct ieee80211_local *local,
-                             struct sta_info *sta, struct sk_buff *skb);
+                             struct sta_info *sta,
+                             struct ieee80211_tx_status *st);
 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
 int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
index daf9db3c8f24f389df84d95ae973c969d65622f1..6950cd0bf5940a0bc76ea0f3bc283c4a1cac7963 100644 (file)
@@ -295,15 +295,12 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
 }
 
 void ieee80211s_update_metric(struct ieee80211_local *local,
-               struct sta_info *sta, struct sk_buff *skb)
+                             struct sta_info *sta,
+                             struct ieee80211_tx_status *st)
 {
-       struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_tx_info *txinfo = st->info;
        int failed;
 
-       if (!ieee80211_is_data(hdr->frame_control))
-               return;
-
        failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
 
        /* moving average, scaled to 100.
index 9a6d7208bf4f809b8cb78856e688c19730ee097e..91d7c0cd18824042044a861cfc1bcb4308c803de 100644 (file)
@@ -479,11 +479,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
        if (!skb)
                return;
 
-       if (dropped) {
-               dev_kfree_skb_any(skb);
-               return;
-       }
-
        if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
                u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
                struct ieee80211_sub_if_data *sdata;
@@ -506,6 +501,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
                }
                rcu_read_unlock();
 
+               dev_kfree_skb_any(skb);
+       } else if (dropped) {
                dev_kfree_skb_any(skb);
        } else {
                /* consumes skb */
@@ -811,7 +808,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
 
                rate_control_tx_status(local, sband, status);
                if (ieee80211_vif_is_mesh(&sta->sdata->vif))
-                       ieee80211s_update_metric(local, sta, skb);
+                       ieee80211s_update_metric(local, sta, status);
 
                if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
                        ieee80211_frame_acked(sta, skb);
@@ -972,6 +969,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
                }
 
                rate_control_tx_status(local, sband, status);
+               if (ieee80211_vif_is_mesh(&sta->sdata->vif))
+                       ieee80211s_update_metric(local, sta, status);
        }
 
        if (acked || noack_success) {
index 5cd5e6e5834efc820c94d299dadfd9164e217253..6c647f425e057d6d3c56acc3ceeb108868541967 100644 (file)
@@ -16,6 +16,7 @@
 #include "ieee80211_i.h"
 #include "driver-ops.h"
 #include "rate.h"
+#include "wme.h"
 
 /* give usermode some time for retries in setting up the TDLS session */
 #define TDLS_PEER_SETUP_TIMEOUT        (15 * HZ)
@@ -1010,14 +1011,13 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
        switch (action_code) {
        case WLAN_TDLS_SETUP_REQUEST:
        case WLAN_TDLS_SETUP_RESPONSE:
-               skb_set_queue_mapping(skb, IEEE80211_AC_BK);
-               skb->priority = 2;
+               skb->priority = 256 + 2;
                break;
        default:
-               skb_set_queue_mapping(skb, IEEE80211_AC_VI);
-               skb->priority = 5;
+               skb->priority = 256 + 5;
                break;
        }
+       skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
 
        /*
         * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
index c42bfa1dcd2c77944b9e75981b74b27830ddbebb..e0ccee23fbcdb209a6f7f5704c2aea2d2ef74782 100644 (file)
@@ -214,6 +214,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
 {
        struct ieee80211_local *local = tx->local;
        struct ieee80211_if_managed *ifmgd;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 
        /* driver doesn't support power save */
        if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
@@ -242,6 +243,9 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
        if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
                return TX_CONTINUE;
 
+       if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
+               return TX_CONTINUE;
+
        ifmgd = &tx->sdata->u.mgd;
 
        /*
@@ -1915,7 +1919,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
                        sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
 
        if (invoke_tx_handlers_early(&tx))
-               return false;
+               return true;
 
        if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
                return true;
index 8fbe6cdbe255d4d32b790baa22e1431d240f6e7f..7f891ffffc052bc432d0537dd821d34667a55152 100644 (file)
@@ -1223,7 +1223,7 @@ static int mpls_netconf_get_devconf(struct sk_buff *in_skb,
        int err;
 
        err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
-                         devconf_mpls_policy, NULL);
+                         devconf_mpls_policy, extack);
        if (err < 0)
                goto errout;
 
@@ -1263,6 +1263,7 @@ errout:
 static int mpls_netconf_dump_devconf(struct sk_buff *skb,
                                     struct netlink_callback *cb)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
        struct net *net = sock_net(skb->sk);
        struct hlist_head *head;
        struct net_device *dev;
@@ -1270,6 +1271,21 @@ static int mpls_netconf_dump_devconf(struct sk_buff *skb,
        int idx, s_idx;
        int h, s_h;
 
+       if (cb->strict_check) {
+               struct netlink_ext_ack *extack = cb->extack;
+               struct netconfmsg *ncm;
+
+               if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
+                       NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
+                       return -EINVAL;
+               }
+
+               if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
+                       NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
+                       return -EINVAL;
+               }
+       }
+
        s_h = cb->args[0];
        s_idx = idx = cb->args[1];
 
@@ -1286,7 +1302,7 @@ static int mpls_netconf_dump_devconf(struct sk_buff *skb,
                                goto cont;
                        if (mpls_netconf_fill_devconf(skb, mdev,
                                                      NETLINK_CB(cb->skb).portid,
-                                                     cb->nlh->nlmsg_seq,
+                                                     nlh->nlmsg_seq,
                                                      RTM_NEWNETCONF,
                                                      NLM_F_MULTI,
                                                      NETCONFA_ALL) < 0) {
@@ -2017,6 +2033,7 @@ nla_put_failure:
 
 static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       const struct nlmsghdr *nlh = cb->nlh;
        struct net *net = sock_net(skb->sk);
        struct mpls_route __rcu **platform_label;
        size_t platform_labels;
@@ -2024,6 +2041,13 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
 
        ASSERT_RTNL();
 
+       if (cb->strict_check) {
+               int err = ip_valid_fib_dump_req(nlh, cb->extack);
+
+               if (err < 0)
+                       return err;
+       }
+
        index = cb->args[0];
        if (index < MPLS_LABEL_FIRST_UNRESERVED)
                index = MPLS_LABEL_FIRST_UNRESERVED;
index 8055e3965cef24ec4f277940311032dcb6bfb470..3d0a33b874f5abdae9e88807a80342f30b3c9eb8 100644 (file)
@@ -68,6 +68,10 @@ enum {
        NCSI_MODE_MAX
 };
 
+/* OEM Vendor Manufacture ID */
+#define NCSI_OEM_MFR_MLX_ID             0x8119
+#define NCSI_OEM_MFR_BCM_ID             0x113d
+
 struct ncsi_channel_version {
        u32 version;            /* Supported BCD encoded NCSI version */
        u32 alpha2;             /* Supported BCD encoded NCSI version */
@@ -305,6 +309,7 @@ struct ncsi_cmd_arg {
                unsigned short words[8];
                unsigned int   dwords[4];
        };
+       unsigned char        *data;       /* NCSI OEM data                 */
 };
 
 extern struct list_head ncsi_dev_list;
index 7567ca63aae24b689e368219826df1be0982446b..82b7d9201db8002c2c1c9600327e01a5f09a2c00 100644 (file)
@@ -211,6 +211,25 @@ static int ncsi_cmd_handler_snfc(struct sk_buff *skb,
        return 0;
 }
 
+static int ncsi_cmd_handler_oem(struct sk_buff *skb,
+                               struct ncsi_cmd_arg *nca)
+{
+       struct ncsi_cmd_oem_pkt *cmd;
+       unsigned int len;
+
+       len = sizeof(struct ncsi_cmd_pkt_hdr) + 4;
+       if (nca->payload < 26)
+               len += 26;
+       else
+               len += nca->payload;
+
+       cmd = skb_put_zero(skb, len);
+       memcpy(&cmd->mfr_id, nca->data, nca->payload);
+       ncsi_cmd_build_header(&cmd->cmd.common, nca);
+
+       return 0;
+}
+
 static struct ncsi_cmd_handler {
        unsigned char type;
        int           payload;
@@ -244,7 +263,7 @@ static struct ncsi_cmd_handler {
        { NCSI_PKT_CMD_GNS,    0, ncsi_cmd_handler_default },
        { NCSI_PKT_CMD_GNPTS,  0, ncsi_cmd_handler_default },
        { NCSI_PKT_CMD_GPS,    0, ncsi_cmd_handler_default },
-       { NCSI_PKT_CMD_OEM,    0, NULL                     },
+       { NCSI_PKT_CMD_OEM,   -1, ncsi_cmd_handler_oem     },
        { NCSI_PKT_CMD_PLDM,   0, NULL                     },
        { NCSI_PKT_CMD_GPUUID, 0, ncsi_cmd_handler_default }
 };
@@ -316,8 +335,13 @@ int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca)
                return -ENOENT;
        }
 
-       /* Get packet payload length and allocate the request */
-       nca->payload = nch->payload;
+       /* Get packet payload length and allocate the request
+        * It is expected that if length set as negative in
+        * handler structure means caller is initializing it
+        * and setting length in nca before calling xmit function
+        */
+       if (nch->payload >= 0)
+               nca->payload = nch->payload;
        nr = ncsi_alloc_command(nca);
        if (!nr)
                return -ENOMEM;
index 91b4b66438df8468056ecdefd670a1e5ad846a38..0f2087c8d42a95a29ca99a8d62cfef4486681943 100644 (file)
@@ -151,6 +151,20 @@ struct ncsi_cmd_snfc_pkt {
        unsigned char           pad[22];
 };
 
+/* OEM Request Command as per NCSI Specification */
+struct ncsi_cmd_oem_pkt {
+       struct ncsi_cmd_pkt_hdr cmd;         /* Command header    */
+       __be32                  mfr_id;      /* Manufacture ID    */
+       unsigned char           data[];      /* OEM Payload Data  */
+};
+
+/* OEM Response Packet as per NCSI Specification */
+struct ncsi_rsp_oem_pkt {
+       struct ncsi_rsp_pkt_hdr rsp;         /* Command header    */
+       __be32                  mfr_id;      /* Manufacture ID    */
+       unsigned char           data[];      /* Payload data      */
+};
+
 /* Get Link Status */
 struct ncsi_rsp_gls_pkt {
        struct ncsi_rsp_pkt_hdr rsp;        /* Response header   */
index 930c1d3796f0f5235875c0181af93e46016c27cd..d66b34749027f1349b56f49c2934a4c7717688bf 100644 (file)
@@ -596,6 +596,47 @@ static int ncsi_rsp_handler_snfc(struct ncsi_request *nr)
        return 0;
 }
 
+static struct ncsi_rsp_oem_handler {
+       unsigned int    mfr_id;
+       int             (*handler)(struct ncsi_request *nr);
+} ncsi_rsp_oem_handlers[] = {
+       { NCSI_OEM_MFR_MLX_ID, NULL },
+       { NCSI_OEM_MFR_BCM_ID, NULL }
+};
+
+/* Response handler for OEM command */
+static int ncsi_rsp_handler_oem(struct ncsi_request *nr)
+{
+       struct ncsi_rsp_oem_pkt *rsp;
+       struct ncsi_rsp_oem_handler *nrh = NULL;
+       unsigned int mfr_id, i;
+
+       /* Get the response header */
+       rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
+       mfr_id = ntohl(rsp->mfr_id);
+
+       /* Check for manufacturer id and Find the handler */
+       for (i = 0; i < ARRAY_SIZE(ncsi_rsp_oem_handlers); i++) {
+               if (ncsi_rsp_oem_handlers[i].mfr_id == mfr_id) {
+                       if (ncsi_rsp_oem_handlers[i].handler)
+                               nrh = &ncsi_rsp_oem_handlers[i];
+                       else
+                               nrh = NULL;
+
+                       break;
+               }
+       }
+
+       if (!nrh) {
+               netdev_err(nr->ndp->ndev.dev, "Received unrecognized OEM packet with MFR-ID (0x%x)\n",
+                          mfr_id);
+               return -ENOENT;
+       }
+
+       /* Process the packet */
+       return nrh->handler(nr);
+}
+
 static int ncsi_rsp_handler_gvi(struct ncsi_request *nr)
 {
        struct ncsi_rsp_gvi_pkt *rsp;
@@ -932,7 +973,7 @@ static struct ncsi_rsp_handler {
        { NCSI_PKT_RSP_GNS,   172, ncsi_rsp_handler_gns     },
        { NCSI_PKT_RSP_GNPTS, 172, ncsi_rsp_handler_gnpts   },
        { NCSI_PKT_RSP_GPS,     8, ncsi_rsp_handler_gps     },
-       { NCSI_PKT_RSP_OEM,     0, NULL                     },
+       { NCSI_PKT_RSP_OEM,    -1, ncsi_rsp_handler_oem     },
        { NCSI_PKT_RSP_PLDM,    0, NULL                     },
        { NCSI_PKT_RSP_GPUUID, 20, ncsi_rsp_handler_gpuuid  }
 };
index f61c306de1d089358ede089f87f97c72c8927670..2ab870ef233a83bda49710a1cb8feddbcb13c010 100644 (file)
@@ -625,6 +625,13 @@ config NFT_FIB_INET
          The lookup will be delegated to the IPv4 or IPv6 FIB depending
          on the protocol of the packet.
 
+config NFT_XFRM
+       tristate "Netfilter nf_tables xfrm/IPSec security association matching"
+       depends on XFRM
+       help
+         This option adds an expression that you can use to extract properties
+         of a packets security association.
+
 config NFT_SOCKET
        tristate "Netfilter nf_tables socket match support"
        depends on IPV6 || IPV6=n
index 16895e045b66b97929937a5c96d3c83a828bdb18..4ddf3ef51ecef1262fe760934f5bb6ccca23d5ba 100644 (file)
@@ -113,6 +113,7 @@ obj-$(CONFIG_NFT_FIB_NETDEV)        += nft_fib_netdev.o
 obj-$(CONFIG_NFT_SOCKET)       += nft_socket.o
 obj-$(CONFIG_NFT_OSF)          += nft_osf.o
 obj-$(CONFIG_NFT_TPROXY)       += nft_tproxy.o
+obj-$(CONFIG_NFT_XFRM)         += nft_xfrm.o
 
 # nf_tables netdev
 obj-$(CONFIG_NFT_DUP_NETDEV)   += nft_dup_netdev.o
index 62eefea489732d6d11195e98388ded730d963283..83395bf6dc35e2a3ea486246e98de99b6e1094da 100644 (file)
@@ -3234,7 +3234,7 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb,
 
        /* Try to find the service for which to dump destinations */
        if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, IPVS_CMD_ATTR_MAX,
-                       ip_vs_cmd_policy, NULL))
+                       ip_vs_cmd_policy, cb->extack))
                goto out_err;
 
 
index a676d5f76bdc26b8a3f192a82a0cbad2e1bc866e..ca1168d67fac6c0fc1eaef5dfeb1db8428e51db3 100644 (file)
@@ -379,7 +379,7 @@ bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
                return false;
        }
 
-       l4proto = __nf_ct_l4proto_find(l3num, protonum);
+       l4proto = __nf_ct_l4proto_find(protonum);
 
        ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
                              l4proto);
@@ -539,7 +539,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
                nf_ct_tmpl_free(ct);
                return;
        }
-       l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+       l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
        if (l4proto->destroy)
                l4proto->destroy(ct);
 
@@ -840,7 +840,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
        enum ip_conntrack_info oldinfo;
        struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
 
-       l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+       l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
        if (l4proto->allow_clash &&
            !nf_ct_is_dying(ct) &&
            atomic_inc_not_zero(&ct->ct_general.use)) {
@@ -1109,7 +1109,7 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
        if (!test_bit(IPS_ASSURED_BIT, &ct->status))
                return true;
 
-       l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+       l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
        if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
                return true;
 
@@ -1370,12 +1370,6 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
 
        timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
 
-       if (!l4proto->new(ct, skb, dataoff)) {
-               nf_conntrack_free(ct);
-               pr_debug("can't track with proto module\n");
-               return NULL;
-       }
-
        if (timeout_ext)
                nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
                                      GFP_ATOMIC);
@@ -1436,12 +1430,12 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
 
 /* On success, returns 0, sets skb->_nfct | ctinfo */
 static int
-resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
+resolve_normal_ct(struct nf_conn *tmpl,
                  struct sk_buff *skb,
                  unsigned int dataoff,
-                 u_int16_t l3num,
                  u_int8_t protonum,
-                 const struct nf_conntrack_l4proto *l4proto)
+                 const struct nf_conntrack_l4proto *l4proto,
+                 const struct nf_hook_state *state)
 {
        const struct nf_conntrack_zone *zone;
        struct nf_conntrack_tuple tuple;
@@ -1452,17 +1446,18 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
        u32 hash;
 
        if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
-                            dataoff, l3num, protonum, net, &tuple, l4proto)) {
+                            dataoff, state->pf, protonum, state->net,
+                            &tuple, l4proto)) {
                pr_debug("Can't get tuple\n");
                return 0;
        }
 
        /* look for tuple match */
        zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
-       hash = hash_conntrack_raw(&tuple, net);
-       h = __nf_conntrack_find_get(net, zone, &tuple, hash);
+       hash = hash_conntrack_raw(&tuple, state->net);
+       h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
        if (!h) {
-               h = init_conntrack(net, tmpl, &tuple, l4proto,
+               h = init_conntrack(state->net, tmpl, &tuple, l4proto,
                                   skb, dataoff, hash);
                if (!h)
                        return 0;
@@ -1491,13 +1486,45 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
        return 0;
 }
 
+/*
+ * icmp packets need special treatment to handle error messages that are
+ * related to a connection.
+ *
+ * Callers need to check if skb has a conntrack assigned when this
+ * helper returns; in such case skb belongs to an already known connection.
+ */
+static unsigned int __cold
+nf_conntrack_handle_icmp(struct nf_conn *tmpl,
+                        struct sk_buff *skb,
+                        unsigned int dataoff,
+                        u8 protonum,
+                        const struct nf_hook_state *state)
+{
+       int ret;
+
+       if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
+               ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
+#if IS_ENABLED(CONFIG_IPV6)
+       else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
+               ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
+#endif
+       else
+               return NF_ACCEPT;
+
+       if (ret <= 0) {
+               NF_CT_STAT_INC_ATOMIC(state->net, error);
+               NF_CT_STAT_INC_ATOMIC(state->net, invalid);
+       }
+
+       return ret;
+}
+
 unsigned int
-nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
-               struct sk_buff *skb)
+nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
 {
        const struct nf_conntrack_l4proto *l4proto;
-       struct nf_conn *ct, *tmpl;
        enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct, *tmpl;
        u_int8_t protonum;
        int dataoff, ret;
 
@@ -1506,32 +1533,28 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
                /* Previously seen (loopback or untracked)?  Ignore. */
                if ((tmpl && !nf_ct_is_template(tmpl)) ||
                     ctinfo == IP_CT_UNTRACKED) {
-                       NF_CT_STAT_INC_ATOMIC(net, ignore);
+                       NF_CT_STAT_INC_ATOMIC(state->net, ignore);
                        return NF_ACCEPT;
                }
                skb->_nfct = 0;
        }
 
        /* rcu_read_lock()ed by nf_hook_thresh */
-       dataoff = get_l4proto(skb, skb_network_offset(skb), pf, &protonum);
+       dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
        if (dataoff <= 0) {
                pr_debug("not prepared to track yet or error occurred\n");
-               NF_CT_STAT_INC_ATOMIC(net, error);
-               NF_CT_STAT_INC_ATOMIC(net, invalid);
+               NF_CT_STAT_INC_ATOMIC(state->net, error);
+               NF_CT_STAT_INC_ATOMIC(state->net, invalid);
                ret = NF_ACCEPT;
                goto out;
        }
 
-       l4proto = __nf_ct_l4proto_find(pf, protonum);
+       l4proto = __nf_ct_l4proto_find(protonum);
 
-       /* It may be an special packet, error, unclean...
-        * inverse of the return code tells to the netfilter
-        * core what to do with the packet. */
-       if (l4proto->error != NULL) {
-               ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum);
+       if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
+               ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
+                                              protonum, state);
                if (ret <= 0) {
-                       NF_CT_STAT_INC_ATOMIC(net, error);
-                       NF_CT_STAT_INC_ATOMIC(net, invalid);
                        ret = -ret;
                        goto out;
                }
@@ -1540,10 +1563,11 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
                        goto out;
        }
 repeat:
-       ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, l4proto);
+       ret = resolve_normal_ct(tmpl, skb, dataoff,
+                               protonum, l4proto, state);
        if (ret < 0) {
                /* Too stressed to deal. */
-               NF_CT_STAT_INC_ATOMIC(net, drop);
+               NF_CT_STAT_INC_ATOMIC(state->net, drop);
                ret = NF_DROP;
                goto out;
        }
@@ -1551,21 +1575,21 @@ repeat:
        ct = nf_ct_get(skb, &ctinfo);
        if (!ct) {
                /* Not valid part of a connection */
-               NF_CT_STAT_INC_ATOMIC(net, invalid);
+               NF_CT_STAT_INC_ATOMIC(state->net, invalid);
                ret = NF_ACCEPT;
                goto out;
        }
 
-       ret = l4proto->packet(ct, skb, dataoff, ctinfo);
+       ret = l4proto->packet(ct, skb, dataoff, ctinfo, state);
        if (ret <= 0) {
                /* Invalid: inverse of the return code tells
                 * the netfilter core what to do */
                pr_debug("nf_conntrack_in: Can't track with proto module\n");
                nf_conntrack_put(&ct->ct_general);
                skb->_nfct = 0;
-               NF_CT_STAT_INC_ATOMIC(net, invalid);
+               NF_CT_STAT_INC_ATOMIC(state->net, invalid);
                if (ret == -NF_DROP)
-                       NF_CT_STAT_INC_ATOMIC(net, drop);
+                       NF_CT_STAT_INC_ATOMIC(state->net, drop);
                /* Special case: TCP tracker reports an attempt to reopen a
                 * closed/aborted connection. We have to go back and create a
                 * fresh conntrack.
@@ -1594,8 +1618,7 @@ bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
 
        rcu_read_lock();
        ret = nf_ct_invert_tuple(inverse, orig,
-                                __nf_ct_l4proto_find(orig->src.l3num,
-                                                     orig->dst.protonum));
+                                __nf_ct_l4proto_find(orig->dst.protonum));
        rcu_read_unlock();
        return ret;
 }
@@ -1752,7 +1775,7 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
        if (dataoff <= 0)
                return -1;
 
-       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+       l4proto = nf_ct_l4proto_find_get(l4num);
 
        if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
                             l4num, net, &tuple, l4proto))
index 27b84231db10178288ed00ecdfc53bfe5a9407f0..3034038bfdf0557cc66c8c4789800e3f2e6451b3 100644 (file)
@@ -610,8 +610,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
                   expect->tuple.src.l3num,
                   expect->tuple.dst.protonum);
        print_tuple(s, &expect->tuple,
-                   __nf_ct_l4proto_find(expect->tuple.src.l3num,
-                                      expect->tuple.dst.protonum));
+                   __nf_ct_l4proto_find(expect->tuple.dst.protonum));
 
        if (expect->flags & NF_CT_EXPECT_PERMANENT) {
                seq_puts(s, "PERMANENT");
index 036207ecaf1663e2ddeb8bf31a5a63e56e319302..4ae8e528943aca9f1881c3b4f77e5ebc231ebe14 100644 (file)
@@ -135,8 +135,7 @@ static int ctnetlink_dump_tuples(struct sk_buff *skb,
        ret = ctnetlink_dump_tuples_ip(skb, tuple);
 
        if (ret >= 0) {
-               l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
-                                              tuple->dst.protonum);
+               l4proto = __nf_ct_l4proto_find(tuple->dst.protonum);
                ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
        }
        rcu_read_unlock();
@@ -184,7 +183,7 @@ static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
        struct nlattr *nest_proto;
        int ret;
 
-       l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+       l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
        if (!l4proto->to_nlattr)
                return 0;
 
@@ -592,7 +591,7 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct)
        len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1);
        len *= 3u; /* ORIG, REPLY, MASTER */
 
-       l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+       l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
        len += l4proto->nlattr_size;
        if (l4proto->nlattr_tuple_size) {
                len4 = l4proto->nlattr_tuple_size();
@@ -821,6 +820,7 @@ static int ctnetlink_done(struct netlink_callback *cb)
 }
 
 struct ctnetlink_filter {
+       u8 family;
        struct {
                u_int32_t val;
                u_int32_t mask;
@@ -828,31 +828,39 @@ struct ctnetlink_filter {
 };
 
 static struct ctnetlink_filter *
-ctnetlink_alloc_filter(const struct nlattr * const cda[])
+ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
 {
-#ifdef CONFIG_NF_CONNTRACK_MARK
        struct ctnetlink_filter *filter;
 
+#ifndef CONFIG_NF_CONNTRACK_MARK
+       if (cda[CTA_MARK] && cda[CTA_MARK_MASK])
+               return ERR_PTR(-EOPNOTSUPP);
+#endif
+
        filter = kzalloc(sizeof(*filter), GFP_KERNEL);
        if (filter == NULL)
                return ERR_PTR(-ENOMEM);
 
-       filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
-       filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
+       filter->family = family;
 
-       return filter;
-#else
-       return ERR_PTR(-EOPNOTSUPP);
+#ifdef CONFIG_NF_CONNTRACK_MARK
+       if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
+               filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
+               filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
+       }
 #endif
+       return filter;
 }
 
 static int ctnetlink_start(struct netlink_callback *cb)
 {
        const struct nlattr * const *cda = cb->data;
        struct ctnetlink_filter *filter = NULL;
+       struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+       u8 family = nfmsg->nfgen_family;
 
-       if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
-               filter = ctnetlink_alloc_filter(cda);
+       if (family || (cda[CTA_MARK] && cda[CTA_MARK_MASK])) {
+               filter = ctnetlink_alloc_filter(cda, family);
                if (IS_ERR(filter))
                        return PTR_ERR(filter);
        }
@@ -866,13 +874,24 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
        struct ctnetlink_filter *filter = data;
 
        if (filter == NULL)
-               return 1;
+               goto out;
+
+       /* Match entries of a given L3 protocol number.
+        * If it is not specified, ie. l3proto == 0,
+        * then match everything.
+        */
+       if (filter->family && nf_ct_l3num(ct) != filter->family)
+               goto ignore_entry;
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-       if ((ct->mark & filter->mark.mask) == filter->mark.val)
-               return 1;
+       if ((ct->mark & filter->mark.mask) != filter->mark.val)
+               goto ignore_entry;
 #endif
 
+out:
+       return 1;
+
+ignore_entry:
        return 0;
 }
 
@@ -883,8 +902,6 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        struct nf_conn *ct, *last;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
-       struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
-       u_int8_t l3proto = nfmsg->nfgen_family;
        struct nf_conn *nf_ct_evict[8];
        int res, i;
        spinlock_t *lockp;
@@ -923,11 +940,6 @@ restart:
                        if (!net_eq(net, nf_ct_net(ct)))
                                continue;
 
-                       /* Dump entries of a given L3 protocol number.
-                        * If it is not specified, ie. l3proto == 0,
-                        * then dump everything. */
-                       if (l3proto && nf_ct_l3num(ct) != l3proto)
-                               continue;
                        if (cb->args[1]) {
                                if (ct != last)
                                        continue;
@@ -1048,7 +1060,7 @@ static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
        tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
 
        rcu_read_lock();
-       l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
+       l4proto = __nf_ct_l4proto_find(tuple->dst.protonum);
 
        if (likely(l4proto->nlattr_to_tuple)) {
                ret = nla_validate_nested(attr, CTA_PROTO_MAX,
@@ -1213,12 +1225,12 @@ static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
 
 static int ctnetlink_flush_conntrack(struct net *net,
                                     const struct nlattr * const cda[],
-                                    u32 portid, int report)
+                                    u32 portid, int report, u8 family)
 {
        struct ctnetlink_filter *filter = NULL;
 
-       if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
-               filter = ctnetlink_alloc_filter(cda);
+       if (family || (cda[CTA_MARK] && cda[CTA_MARK_MASK])) {
+               filter = ctnetlink_alloc_filter(cda, family);
                if (IS_ERR(filter))
                        return PTR_ERR(filter);
        }
@@ -1257,7 +1269,7 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
        else {
                return ctnetlink_flush_conntrack(net, cda,
                                                 NETLINK_CB(skb).portid,
-                                                nlmsg_report(nlh));
+                                                nlmsg_report(nlh), u3);
        }
 
        if (err < 0)
@@ -1696,7 +1708,7 @@ static int ctnetlink_change_protoinfo(struct nf_conn *ct,
                return err;
 
        rcu_read_lock();
-       l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+       l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
        if (l4proto->from_nlattr)
                err = l4proto->from_nlattr(tb, ct);
        rcu_read_unlock();
@@ -2656,8 +2668,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
        rcu_read_lock();
        ret = ctnetlink_dump_tuples_ip(skb, &m);
        if (ret >= 0) {
-               l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
-                                              tuple->dst.protonum);
+               l4proto = __nf_ct_l4proto_find(tuple->dst.protonum);
        ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
        }
        rcu_read_unlock();
index 51c5d7eec0a3517518a38cce411bf7b09189c15e..40643af7137e617d7492d8803581f5c39ac6902f 100644 (file)
@@ -43,7 +43,7 @@
 
 extern unsigned int nf_conntrack_net_id;
 
-static struct nf_conntrack_l4proto __rcu **nf_ct_protos[NFPROTO_NUMPROTO] __read_mostly;
+static struct nf_conntrack_l4proto __rcu *nf_ct_protos[MAX_NF_CT_PROTO + 1] __read_mostly;
 
 static DEFINE_MUTEX(nf_ct_proto_mutex);
 
@@ -124,23 +124,21 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
 EXPORT_SYMBOL_GPL(nf_ct_l4proto_log_invalid);
 #endif
 
-const struct nf_conntrack_l4proto *
-__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto)
+const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto)
 {
-       if (unlikely(l3proto >= NFPROTO_NUMPROTO || nf_ct_protos[l3proto] == NULL))
+       if (unlikely(l4proto >= ARRAY_SIZE(nf_ct_protos)))
                return &nf_conntrack_l4proto_generic;
 
-       return rcu_dereference(nf_ct_protos[l3proto][l4proto]);
+       return rcu_dereference(nf_ct_protos[l4proto]);
 }
 EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find);
 
-const struct nf_conntrack_l4proto *
-nf_ct_l4proto_find_get(u_int16_t l3num, u_int8_t l4num)
+const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4num)
 {
        const struct nf_conntrack_l4proto *p;
 
        rcu_read_lock();
-       p = __nf_ct_l4proto_find(l3num, l4num);
+       p = __nf_ct_l4proto_find(l4num);
        if (!try_module_get(p->me))
                p = &nf_conntrack_l4proto_generic;
        rcu_read_unlock();
@@ -159,8 +157,7 @@ static int kill_l4proto(struct nf_conn *i, void *data)
 {
        const struct nf_conntrack_l4proto *l4proto;
        l4proto = data;
-       return nf_ct_protonum(i) == l4proto->l4proto &&
-              nf_ct_l3num(i) == l4proto->l3proto;
+       return nf_ct_protonum(i) == l4proto->l4proto;
 }
 
 static struct nf_proto_net *nf_ct_l4proto_net(struct net *net,
@@ -219,48 +216,20 @@ int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *l4proto)
 {
        int ret = 0;
 
-       if (l4proto->l3proto >= ARRAY_SIZE(nf_ct_protos))
-               return -EBUSY;
-
        if ((l4proto->to_nlattr && l4proto->nlattr_size == 0) ||
            (l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size))
                return -EINVAL;
 
        mutex_lock(&nf_ct_proto_mutex);
-       if (!nf_ct_protos[l4proto->l3proto]) {
-               /* l3proto may be loaded latter. */
-               struct nf_conntrack_l4proto __rcu **proto_array;
-               int i;
-
-               proto_array =
-                       kmalloc_array(MAX_NF_CT_PROTO,
-                                     sizeof(struct nf_conntrack_l4proto *),
-                                     GFP_KERNEL);
-               if (proto_array == NULL) {
-                       ret = -ENOMEM;
-                       goto out_unlock;
-               }
-
-               for (i = 0; i < MAX_NF_CT_PROTO; i++)
-                       RCU_INIT_POINTER(proto_array[i],
-                                        &nf_conntrack_l4proto_generic);
-
-               /* Before making proto_array visible to lockless readers,
-                * we must make sure its content is committed to memory.
-                */
-               smp_wmb();
-
-               nf_ct_protos[l4proto->l3proto] = proto_array;
-       } else if (rcu_dereference_protected(
-                       nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+       if (rcu_dereference_protected(
+                       nf_ct_protos[l4proto->l4proto],
                        lockdep_is_held(&nf_ct_proto_mutex)
                        ) != &nf_conntrack_l4proto_generic) {
                ret = -EBUSY;
                goto out_unlock;
        }
 
-       rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
-                          l4proto);
+       rcu_assign_pointer(nf_ct_protos[l4proto->l4proto], l4proto);
 out_unlock:
        mutex_unlock(&nf_ct_proto_mutex);
        return ret;
@@ -274,7 +243,7 @@ int nf_ct_l4proto_pernet_register_one(struct net *net,
        struct nf_proto_net *pn = NULL;
 
        if (l4proto->init_net) {
-               ret = l4proto->init_net(net, l4proto->l3proto);
+               ret = l4proto->init_net(net);
                if (ret < 0)
                        goto out;
        }
@@ -296,13 +265,13 @@ EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register_one);
 static void __nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *l4proto)
 
 {
-       BUG_ON(l4proto->l3proto >= ARRAY_SIZE(nf_ct_protos));
+       BUG_ON(l4proto->l4proto >= ARRAY_SIZE(nf_ct_protos));
 
        BUG_ON(rcu_dereference_protected(
-                       nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+                       nf_ct_protos[l4proto->l4proto],
                        lockdep_is_held(&nf_ct_proto_mutex)
                        ) != l4proto);
-       rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+       rcu_assign_pointer(nf_ct_protos[l4proto->l4proto],
                           &nf_conntrack_l4proto_generic);
 }
 
@@ -352,7 +321,7 @@ static int
 nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[],
                       unsigned int num_proto)
 {
-       int ret = -EINVAL, ver;
+       int ret = -EINVAL;
        unsigned int i;
 
        for (i = 0; i < num_proto; i++) {
@@ -361,9 +330,8 @@ nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[],
                        break;
        }
        if (i != num_proto) {
-               ver = l4proto[i]->l3proto == PF_INET6 ? 6 : 4;
-               pr_err("nf_conntrack_ipv%d: can't register l4 %d proto.\n",
-                      ver, l4proto[i]->l4proto);
+               pr_err("nf_conntrack: can't register l4 %d proto.\n",
+                      l4proto[i]->l4proto);
                nf_ct_l4proto_unregister(l4proto, i);
        }
        return ret;
@@ -382,9 +350,8 @@ int nf_ct_l4proto_pernet_register(struct net *net,
                        break;
        }
        if (i != num_proto) {
-               pr_err("nf_conntrack_proto_%d %d: pernet registration failed\n",
-                      l4proto[i]->l4proto,
-                      l4proto[i]->l3proto == PF_INET6 ? 6 : 4);
+               pr_err("nf_conntrack %d: pernet registration failed\n",
+                      l4proto[i]->l4proto);
                nf_ct_l4proto_pernet_unregister(net, l4proto, i);
        }
        return ret;
@@ -455,7 +422,7 @@ static unsigned int ipv4_conntrack_in(void *priv,
                                      struct sk_buff *skb,
                                      const struct nf_hook_state *state)
 {
-       return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
+       return nf_conntrack_in(skb, state);
 }
 
 static unsigned int ipv4_conntrack_local(void *priv,
@@ -477,7 +444,7 @@ static unsigned int ipv4_conntrack_local(void *priv,
                return NF_ACCEPT;
        }
 
-       return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
+       return nf_conntrack_in(skb, state);
 }
 
 /* Connection tracking may drop packets, but never alters them, so
@@ -690,14 +657,14 @@ static unsigned int ipv6_conntrack_in(void *priv,
                                      struct sk_buff *skb,
                                      const struct nf_hook_state *state)
 {
-       return nf_conntrack_in(state->net, PF_INET6, state->hook, skb);
+       return nf_conntrack_in(skb, state);
 }
 
 static unsigned int ipv6_conntrack_local(void *priv,
                                         struct sk_buff *skb,
                                         const struct nf_hook_state *state)
 {
-       return nf_conntrack_in(state->net, PF_INET6, state->hook, skb);
+       return nf_conntrack_in(skb, state);
 }
 
 static unsigned int ipv6_helper(void *priv,
@@ -911,37 +878,26 @@ void nf_ct_netns_put(struct net *net, uint8_t nfproto)
 EXPORT_SYMBOL_GPL(nf_ct_netns_put);
 
 static const struct nf_conntrack_l4proto * const builtin_l4proto[] = {
-       &nf_conntrack_l4proto_tcp4,
-       &nf_conntrack_l4proto_udp4,
+       &nf_conntrack_l4proto_tcp,
+       &nf_conntrack_l4proto_udp,
        &nf_conntrack_l4proto_icmp,
 #ifdef CONFIG_NF_CT_PROTO_DCCP
-       &nf_conntrack_l4proto_dccp4,
+       &nf_conntrack_l4proto_dccp,
 #endif
 #ifdef CONFIG_NF_CT_PROTO_SCTP
-       &nf_conntrack_l4proto_sctp4,
+       &nf_conntrack_l4proto_sctp,
 #endif
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
-       &nf_conntrack_l4proto_udplite4,
+       &nf_conntrack_l4proto_udplite,
 #endif
 #if IS_ENABLED(CONFIG_IPV6)
-       &nf_conntrack_l4proto_tcp6,
-       &nf_conntrack_l4proto_udp6,
        &nf_conntrack_l4proto_icmpv6,
-#ifdef CONFIG_NF_CT_PROTO_DCCP
-       &nf_conntrack_l4proto_dccp6,
-#endif
-#ifdef CONFIG_NF_CT_PROTO_SCTP
-       &nf_conntrack_l4proto_sctp6,
-#endif
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-       &nf_conntrack_l4proto_udplite6,
-#endif
 #endif /* CONFIG_IPV6 */
 };
 
 int nf_conntrack_proto_init(void)
 {
-       int ret = 0;
+       int ret = 0, i;
 
        ret = nf_register_sockopt(&so_getorigdst);
        if (ret < 0)
@@ -952,6 +908,11 @@ int nf_conntrack_proto_init(void)
        if (ret < 0)
                goto cleanup_sockopt;
 #endif
+
+       for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++)
+               RCU_INIT_POINTER(nf_ct_protos[i],
+                                &nf_conntrack_l4proto_generic);
+
        ret = nf_ct_l4proto_register(builtin_l4proto,
                                     ARRAY_SIZE(builtin_l4proto));
        if (ret < 0)
@@ -969,17 +930,10 @@ cleanup_sockopt:
 
 void nf_conntrack_proto_fini(void)
 {
-       unsigned int i;
-
        nf_unregister_sockopt(&so_getorigdst);
 #if IS_ENABLED(CONFIG_IPV6)
        nf_unregister_sockopt(&so_getorigdst6);
 #endif
-       /* No need to call nf_ct_l4proto_unregister(), the register
-        * tables are free'd here anyway.
-        */
-       for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++)
-               kfree(nf_ct_protos[i]);
 }
 
 int nf_conntrack_proto_pernet_init(struct net *net)
@@ -988,8 +942,7 @@ int nf_conntrack_proto_pernet_init(struct net *net)
        struct nf_proto_net *pn = nf_ct_l4proto_net(net,
                                        &nf_conntrack_l4proto_generic);
 
-       err = nf_conntrack_l4proto_generic.init_net(net,
-                                       nf_conntrack_l4proto_generic.l3proto);
+       err = nf_conntrack_l4proto_generic.init_net(net);
        if (err < 0)
                return err;
        err = nf_ct_l4proto_register_sysctl(net,
index f3f91ed2c21adce5dcc5e9ba06d6a843a6af39bb..171e9e122e5f1e8b8840e41013d86246ba8025b9 100644 (file)
@@ -389,18 +389,15 @@ static inline struct nf_dccp_net *dccp_pernet(struct net *net)
        return &net->ct.nf_ct_proto.dccp;
 }
 
-static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
-                    unsigned int dataoff)
+static noinline bool
+dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
+        const struct dccp_hdr *dh)
 {
        struct net *net = nf_ct_net(ct);
        struct nf_dccp_net *dn;
-       struct dccp_hdr _dh, *dh;
        const char *msg;
        u_int8_t state;
 
-       dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
-       BUG_ON(dh == NULL);
-
        state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
        switch (state) {
        default:
@@ -438,8 +435,51 @@ static u64 dccp_ack_seq(const struct dccp_hdr *dh)
                     ntohl(dhack->dccph_ack_nr_low);
 }
 
-static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
-                      unsigned int dataoff, enum ip_conntrack_info ctinfo)
+static bool dccp_error(const struct dccp_hdr *dh,
+                      struct sk_buff *skb, unsigned int dataoff,
+                      const struct nf_hook_state *state)
+{
+       unsigned int dccp_len = skb->len - dataoff;
+       unsigned int cscov;
+       const char *msg;
+
+       if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
+           dh->dccph_doff * 4 > dccp_len) {
+               msg = "nf_ct_dccp: truncated/malformed packet ";
+               goto out_invalid;
+       }
+
+       cscov = dccp_len;
+       if (dh->dccph_cscov) {
+               cscov = (dh->dccph_cscov - 1) * 4;
+               if (cscov > dccp_len) {
+                       msg = "nf_ct_dccp: bad checksum coverage ";
+                       goto out_invalid;
+               }
+       }
+
+       if (state->hook == NF_INET_PRE_ROUTING &&
+           state->net->ct.sysctl_checksum &&
+           nf_checksum_partial(skb, state->hook, dataoff, cscov,
+                               IPPROTO_DCCP, state->pf)) {
+               msg = "nf_ct_dccp: bad checksum ";
+               goto out_invalid;
+       }
+
+       if (dh->dccph_type >= DCCP_PKT_INVALID) {
+               msg = "nf_ct_dccp: reserved packet type ";
+               goto out_invalid;
+       }
+       return false;
+out_invalid:
+       nf_l4proto_log_invalid(skb, state->net, state->pf,
+                              IPPROTO_DCCP, "%s", msg);
+       return true;
+}
+
+static int dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
+                      unsigned int dataoff, enum ip_conntrack_info ctinfo,
+                      const struct nf_hook_state *state)
 {
        enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
        struct dccp_hdr _dh, *dh;
@@ -448,8 +488,15 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
        unsigned int *timeouts;
 
        dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
-       BUG_ON(dh == NULL);
+       if (!dh)
+               return NF_DROP;
+
+       if (dccp_error(dh, skb, dataoff, state))
+               return -NF_ACCEPT;
+
        type = dh->dccph_type;
+       if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh))
+               return -NF_ACCEPT;
 
        if (type == DCCP_PKT_RESET &&
            !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
@@ -527,55 +574,6 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
        return NF_ACCEPT;
 }
 
-static int dccp_error(struct net *net, struct nf_conn *tmpl,
-                     struct sk_buff *skb, unsigned int dataoff,
-                     u_int8_t pf, unsigned int hooknum)
-{
-       struct dccp_hdr _dh, *dh;
-       unsigned int dccp_len = skb->len - dataoff;
-       unsigned int cscov;
-       const char *msg;
-
-       dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
-       if (dh == NULL) {
-               msg = "nf_ct_dccp: short packet ";
-               goto out_invalid;
-       }
-
-       if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
-           dh->dccph_doff * 4 > dccp_len) {
-               msg = "nf_ct_dccp: truncated/malformed packet ";
-               goto out_invalid;
-       }
-
-       cscov = dccp_len;
-       if (dh->dccph_cscov) {
-               cscov = (dh->dccph_cscov - 1) * 4;
-               if (cscov > dccp_len) {
-                       msg = "nf_ct_dccp: bad checksum coverage ";
-                       goto out_invalid;
-               }
-       }
-
-       if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
-           nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_DCCP,
-                               pf)) {
-               msg = "nf_ct_dccp: bad checksum ";
-               goto out_invalid;
-       }
-
-       if (dh->dccph_type >= DCCP_PKT_INVALID) {
-               msg = "nf_ct_dccp: reserved packet type ";
-               goto out_invalid;
-       }
-
-       return NF_ACCEPT;
-
-out_invalid:
-       nf_l4proto_log_invalid(skb, net, pf, IPPROTO_DCCP, "%s", msg);
-       return -NF_ACCEPT;
-}
-
 static bool dccp_can_early_drop(const struct nf_conn *ct)
 {
        switch (ct->proto.dccp.state) {
@@ -814,7 +812,7 @@ static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn,
        return 0;
 }
 
-static int dccp_init_net(struct net *net, u_int16_t proto)
+static int dccp_init_net(struct net *net)
 {
        struct nf_dccp_net *dn = dccp_pernet(net);
        struct nf_proto_net *pn = &dn->pn;
@@ -844,45 +842,9 @@ static struct nf_proto_net *dccp_get_net_proto(struct net *net)
        return &net->ct.nf_ct_proto.dccp.pn;
 }
 
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
-       .l3proto                = AF_INET,
-       .l4proto                = IPPROTO_DCCP,
-       .new                    = dccp_new,
-       .packet                 = dccp_packet,
-       .error                  = dccp_error,
-       .can_early_drop         = dccp_can_early_drop,
-#ifdef CONFIG_NF_CONNTRACK_PROCFS
-       .print_conntrack        = dccp_print_conntrack,
-#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-       .nlattr_size            = DCCP_NLATTR_SIZE,
-       .to_nlattr              = dccp_to_nlattr,
-       .from_nlattr            = nlattr_to_dccp,
-       .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
-       .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
-       .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
-       .nla_policy             = nf_ct_port_nla_policy,
-#endif
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-       .ctnl_timeout           = {
-               .nlattr_to_obj  = dccp_timeout_nlattr_to_obj,
-               .obj_to_nlattr  = dccp_timeout_obj_to_nlattr,
-               .nlattr_max     = CTA_TIMEOUT_DCCP_MAX,
-               .obj_size       = sizeof(unsigned int) * CT_DCCP_MAX,
-               .nla_policy     = dccp_timeout_nla_policy,
-       },
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-       .init_net               = dccp_init_net,
-       .get_net_proto          = dccp_get_net_proto,
-};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp4);
-
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
-       .l3proto                = AF_INET6,
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp = {
        .l4proto                = IPPROTO_DCCP,
-       .new                    = dccp_new,
        .packet                 = dccp_packet,
-       .error                  = dccp_error,
        .can_early_drop         = dccp_can_early_drop,
 #ifdef CONFIG_NF_CONNTRACK_PROCFS
        .print_conntrack        = dccp_print_conntrack,
@@ -908,4 +870,3 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
        .init_net               = dccp_init_net,
        .get_net_proto          = dccp_get_net_proto,
 };
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp6);
index 1df3244ecd07fc573538cccb83da67efaa69a5be..e10e867e0b55f3203e8a50d4ac7c884201ac1186 100644 (file)
@@ -44,12 +44,19 @@ static bool generic_pkt_to_tuple(const struct sk_buff *skb,
 
 /* Returns verdict for packet, or -1 for invalid. */
 static int generic_packet(struct nf_conn *ct,
-                         const struct sk_buff *skb,
+                         struct sk_buff *skb,
                          unsigned int dataoff,
-                         enum ip_conntrack_info ctinfo)
+                         enum ip_conntrack_info ctinfo,
+                         const struct nf_hook_state *state)
 {
        const unsigned int *timeout = nf_ct_timeout_lookup(ct);
 
+       if (!nf_generic_should_process(nf_ct_protonum(ct))) {
+               pr_warn_once("conntrack: generic helper won't handle protocol %d. Please consider loading the specific helper module.\n",
+                            nf_ct_protonum(ct));
+               return -NF_ACCEPT;
+       }
+
        if (!timeout)
                timeout = &generic_pernet(nf_ct_net(ct))->timeout;
 
@@ -57,19 +64,6 @@ static int generic_packet(struct nf_conn *ct,
        return NF_ACCEPT;
 }
 
-/* Called when a new connection for this protocol found. */
-static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
-                       unsigned int dataoff)
-{
-       bool ret;
-
-       ret = nf_generic_should_process(nf_ct_protonum(ct));
-       if (!ret)
-               pr_warn_once("conntrack: generic helper won't handle protocol %d. Please consider loading the specific helper module.\n",
-                            nf_ct_protonum(ct));
-       return ret;
-}
-
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
@@ -142,7 +136,7 @@ static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
        return 0;
 }
 
-static int generic_init_net(struct net *net, u_int16_t proto)
+static int generic_init_net(struct net *net)
 {
        struct nf_generic_net *gn = generic_pernet(net);
        struct nf_proto_net *pn = &gn->pn;
@@ -159,11 +153,9 @@ static struct nf_proto_net *generic_get_net_proto(struct net *net)
 
 const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
 {
-       .l3proto                = PF_UNSPEC,
        .l4proto                = 255,
        .pkt_to_tuple           = generic_pkt_to_tuple,
        .packet                 = generic_packet,
-       .new                    = generic_new,
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = generic_timeout_nlattr_to_obj,
index 650eb4fba2c5418951b4d22e62325726a745af22..9b48dc8b4b885a00d8806038fc5fd0948e60cbca 100644 (file)
@@ -233,10 +233,26 @@ static unsigned int *gre_get_timeouts(struct net *net)
 
 /* Returns verdict for packet, and may modify conntrack */
 static int gre_packet(struct nf_conn *ct,
-                     const struct sk_buff *skb,
+                     struct sk_buff *skb,
                      unsigned int dataoff,
-                     enum ip_conntrack_info ctinfo)
+                     enum ip_conntrack_info ctinfo,
+                     const struct nf_hook_state *state)
 {
+       if (state->pf != NFPROTO_IPV4)
+               return -NF_ACCEPT;
+
+       if (!nf_ct_is_confirmed(ct)) {
+               unsigned int *timeouts = nf_ct_timeout_lookup(ct);
+
+               if (!timeouts)
+                       timeouts = gre_get_timeouts(nf_ct_net(ct));
+
+               /* initialize to sane value.  Ideally a conntrack helper
+                * (e.g. in case of pptp) is increasing them */
+               ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED];
+               ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
+       }
+
        /* If we've seen traffic both ways, this is a GRE connection.
         * Extend timeout. */
        if (ct->status & IPS_SEEN_REPLY) {
@@ -252,26 +268,6 @@ static int gre_packet(struct nf_conn *ct,
        return NF_ACCEPT;
 }
 
-/* Called when a new connection for this protocol found. */
-static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb,
-                   unsigned int dataoff)
-{
-       unsigned int *timeouts = nf_ct_timeout_lookup(ct);
-
-       if (!timeouts)
-               timeouts = gre_get_timeouts(nf_ct_net(ct));
-
-       pr_debug(": ");
-       nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-
-       /* initialize to sane value.  Ideally a conntrack helper
-        * (e.g. in case of pptp) is increasing them */
-       ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED];
-       ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
-
-       return true;
-}
-
 /* Called when a conntrack entry has already been removed from the hashes
  * and is about to be deleted from memory */
 static void gre_destroy(struct nf_conn *ct)
@@ -336,7 +332,7 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
 };
 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
-static int gre_init_net(struct net *net, u_int16_t proto)
+static int gre_init_net(struct net *net)
 {
        struct netns_proto_gre *net_gre = gre_pernet(net);
        int i;
@@ -351,14 +347,12 @@ static int gre_init_net(struct net *net, u_int16_t proto)
 
 /* protocol helper struct */
 static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
-       .l3proto         = AF_INET,
        .l4proto         = IPPROTO_GRE,
        .pkt_to_tuple    = gre_pkt_to_tuple,
 #ifdef CONFIG_NF_CONNTRACK_PROCFS
        .print_conntrack = gre_print_conntrack,
 #endif
        .packet          = gre_packet,
-       .new             = gre_new,
        .destroy         = gre_destroy,
        .me              = THIS_MODULE,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
index 43c7e1a217b98682a1abd5d3cdb6f63fa1febec2..3598520bd19b7b76dbd91bb42e4b8b91713abf2c 100644 (file)
@@ -72,34 +72,17 @@ static bool icmp_invert_tuple(struct nf_conntrack_tuple *tuple,
        return true;
 }
 
-static unsigned int *icmp_get_timeouts(struct net *net)
-{
-       return &icmp_pernet(net)->timeout;
-}
-
 /* Returns verdict for packet, or -1 for invalid. */
 static int icmp_packet(struct nf_conn *ct,
-                      const struct sk_buff *skb,
+                      struct sk_buff *skb,
                       unsigned int dataoff,
-                      enum ip_conntrack_info ctinfo)
+                      enum ip_conntrack_info ctinfo,
+                      const struct nf_hook_state *state)
 {
        /* Do not immediately delete the connection after the first
           successful reply to avoid excessive conntrackd traffic
           and also to handle correctly ICMP echo reply duplicates. */
        unsigned int *timeout = nf_ct_timeout_lookup(ct);
-
-       if (!timeout)
-               timeout = icmp_get_timeouts(nf_ct_net(ct));
-
-       nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
-
-       return NF_ACCEPT;
-}
-
-/* Called when a new connection for this protocol found. */
-static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
-                    unsigned int dataoff)
-{
        static const u_int8_t valid_new[] = {
                [ICMP_ECHO] = 1,
                [ICMP_TIMESTAMP] = 1,
@@ -107,21 +90,29 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
                [ICMP_ADDRESS] = 1
        };
 
+       if (state->pf != NFPROTO_IPV4)
+               return -NF_ACCEPT;
+
        if (ct->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) ||
            !valid_new[ct->tuplehash[0].tuple.dst.u.icmp.type]) {
                /* Can't create a new ICMP `conn' with this. */
                pr_debug("icmp: can't create new conn with type %u\n",
                         ct->tuplehash[0].tuple.dst.u.icmp.type);
                nf_ct_dump_tuple_ip(&ct->tuplehash[0].tuple);
-               return false;
+               return -NF_ACCEPT;
        }
-       return true;
+
+       if (!timeout)
+               timeout = &icmp_pernet(nf_ct_net(ct))->timeout;
+
+       nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
+       return NF_ACCEPT;
 }
 
 /* Returns conntrack if it dealt with ICMP, and filled in skb fields */
 static int
-icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
-                unsigned int hooknum)
+icmp_error_message(struct nf_conn *tmpl, struct sk_buff *skb,
+                  const struct nf_hook_state *state)
 {
        struct nf_conntrack_tuple innertuple, origtuple;
        const struct nf_conntrack_l4proto *innerproto;
@@ -137,13 +128,13 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
        if (!nf_ct_get_tuplepr(skb,
                               skb_network_offset(skb) + ip_hdrlen(skb)
                                                       + sizeof(struct icmphdr),
-                              PF_INET, net, &origtuple)) {
+                              PF_INET, state->net, &origtuple)) {
                pr_debug("icmp_error_message: failed to get tuple\n");
                return -NF_ACCEPT;
        }
 
        /* rcu_read_lock()ed by nf_hook_thresh */
-       innerproto = __nf_ct_l4proto_find(PF_INET, origtuple.dst.protonum);
+       innerproto = __nf_ct_l4proto_find(origtuple.dst.protonum);
 
        /* Ordinarily, we'd expect the inverted tupleproto, but it's
           been preserved inside the ICMP. */
@@ -154,7 +145,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
 
        ctinfo = IP_CT_RELATED;
 
-       h = nf_conntrack_find_get(net, zone, &innertuple);
+       h = nf_conntrack_find_get(state->net, zone, &innertuple);
        if (!h) {
                pr_debug("icmp_error_message: no match\n");
                return -NF_ACCEPT;
@@ -168,17 +159,18 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
        return NF_ACCEPT;
 }
 
-static void icmp_error_log(const struct sk_buff *skb, struct net *net,
-                          u8 pf, const char *msg)
+static void icmp_error_log(const struct sk_buff *skb,
+                          const struct nf_hook_state *state,
+                          const char *msg)
 {
-       nf_l4proto_log_invalid(skb, net, pf, IPPROTO_ICMP, "%s", msg);
+       nf_l4proto_log_invalid(skb, state->net, state->pf,
+                              IPPROTO_ICMP, "%s", msg);
 }
 
 /* Small and modified version of icmp_rcv */
-static int
-icmp_error(struct net *net, struct nf_conn *tmpl,
-          struct sk_buff *skb, unsigned int dataoff,
-          u8 pf, unsigned int hooknum)
+int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
+                             struct sk_buff *skb, unsigned int dataoff,
+                             const struct nf_hook_state *state)
 {
        const struct icmphdr *icmph;
        struct icmphdr _ih;
@@ -186,14 +178,15 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
        /* Not enough header? */
        icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih);
        if (icmph == NULL) {
-               icmp_error_log(skb, net, pf, "short packet");
+               icmp_error_log(skb, state, "short packet");
                return -NF_ACCEPT;
        }
 
        /* See ip_conntrack_proto_tcp.c */
-       if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
-           nf_ip_checksum(skb, hooknum, dataoff, 0)) {
-               icmp_error_log(skb, net, pf, "bad hw icmp checksum");
+       if (state->net->ct.sysctl_checksum &&
+           state->hook == NF_INET_PRE_ROUTING &&
+           nf_ip_checksum(skb, state->hook, dataoff, 0)) {
+               icmp_error_log(skb, state, "bad hw icmp checksum");
                return -NF_ACCEPT;
        }
 
@@ -204,7 +197,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
         *                discarded.
         */
        if (icmph->type > NR_ICMP_TYPES) {
-               icmp_error_log(skb, net, pf, "invalid icmp type");
+               icmp_error_log(skb, state, "invalid icmp type");
                return -NF_ACCEPT;
        }
 
@@ -216,7 +209,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
            icmph->type != ICMP_REDIRECT)
                return NF_ACCEPT;
 
-       return icmp_error_message(net, tmpl, skb, hooknum);
+       return icmp_error_message(tmpl, skb, state);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -342,7 +335,7 @@ static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
        return 0;
 }
 
-static int icmp_init_net(struct net *net, u_int16_t proto)
+static int icmp_init_net(struct net *net)
 {
        struct nf_icmp_net *in = icmp_pernet(net);
        struct nf_proto_net *pn = &in->pn;
@@ -359,13 +352,10 @@ static struct nf_proto_net *icmp_get_net_proto(struct net *net)
 
 const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
 {
-       .l3proto                = PF_INET,
        .l4proto                = IPPROTO_ICMP,
        .pkt_to_tuple           = icmp_pkt_to_tuple,
        .invert_tuple           = icmp_invert_tuple,
        .packet                 = icmp_packet,
-       .new                    = icmp_new,
-       .error                  = icmp_error,
        .destroy                = NULL,
        .me                     = NULL,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
index 97e40f77d678a64204cfdd660b92bb1960752305..378618feed5da7df50e09c8ec4f72618953306b0 100644 (file)
@@ -92,11 +92,31 @@ static unsigned int *icmpv6_get_timeouts(struct net *net)
 
 /* Returns verdict for packet, or -1 for invalid. */
 static int icmpv6_packet(struct nf_conn *ct,
-                      const struct sk_buff *skb,
-                      unsigned int dataoff,
-                      enum ip_conntrack_info ctinfo)
+                        struct sk_buff *skb,
+                        unsigned int dataoff,
+                        enum ip_conntrack_info ctinfo,
+                        const struct nf_hook_state *state)
 {
        unsigned int *timeout = nf_ct_timeout_lookup(ct);
+       static const u8 valid_new[] = {
+               [ICMPV6_ECHO_REQUEST - 128] = 1,
+               [ICMPV6_NI_QUERY - 128] = 1
+       };
+
+       if (state->pf != NFPROTO_IPV6)
+               return -NF_ACCEPT;
+
+       if (!nf_ct_is_confirmed(ct)) {
+               int type = ct->tuplehash[0].tuple.dst.u.icmp.type - 128;
+
+               if (type < 0 || type >= sizeof(valid_new) || !valid_new[type]) {
+                       /* Can't create a new ICMPv6 `conn' with this. */
+                       pr_debug("icmpv6: can't create new conn with type %u\n",
+                                type + 128);
+                       nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple);
+                       return -NF_ACCEPT;
+               }
+       }
 
        if (!timeout)
                timeout = icmpv6_get_timeouts(nf_ct_net(ct));
@@ -109,26 +129,6 @@ static int icmpv6_packet(struct nf_conn *ct,
        return NF_ACCEPT;
 }
 
-/* Called when a new connection for this protocol found. */
-static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
-                      unsigned int dataoff)
-{
-       static const u_int8_t valid_new[] = {
-               [ICMPV6_ECHO_REQUEST - 128] = 1,
-               [ICMPV6_NI_QUERY - 128] = 1
-       };
-       int type = ct->tuplehash[0].tuple.dst.u.icmp.type - 128;
-
-       if (type < 0 || type >= sizeof(valid_new) || !valid_new[type]) {
-               /* Can't create a new ICMPv6 `conn' with this. */
-               pr_debug("icmpv6: can't create new conn with type %u\n",
-                        type + 128);
-               nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple);
-               return false;
-       }
-       return true;
-}
-
 static int
 icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
                     struct sk_buff *skb,
@@ -153,7 +153,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
        }
 
        /* rcu_read_lock()ed by nf_hook_thresh */
-       inproto = __nf_ct_l4proto_find(PF_INET6, origtuple.dst.protonum);
+       inproto = __nf_ct_l4proto_find(origtuple.dst.protonum);
 
        /* Ordinarily, we'd expect the inverted tupleproto, but it's
           been preserved inside the ICMP. */
@@ -179,16 +179,18 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
        return NF_ACCEPT;
 }
 
-static void icmpv6_error_log(const struct sk_buff *skb, struct net *net,
-                            u8 pf, const char *msg)
+static void icmpv6_error_log(const struct sk_buff *skb,
+                            const struct nf_hook_state *state,
+                            const char *msg)
 {
-       nf_l4proto_log_invalid(skb, net, pf, IPPROTO_ICMPV6, "%s", msg);
+       nf_l4proto_log_invalid(skb, state->net, state->pf,
+                              IPPROTO_ICMPV6, "%s", msg);
 }
 
-static int
-icmpv6_error(struct net *net, struct nf_conn *tmpl,
-            struct sk_buff *skb, unsigned int dataoff,
-            u8 pf, unsigned int hooknum)
+int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
+                             struct sk_buff *skb,
+                             unsigned int dataoff,
+                             const struct nf_hook_state *state)
 {
        const struct icmp6hdr *icmp6h;
        struct icmp6hdr _ih;
@@ -196,13 +198,14 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
 
        icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
        if (icmp6h == NULL) {
-               icmpv6_error_log(skb, net, pf, "short packet");
+               icmpv6_error_log(skb, state, "short packet");
                return -NF_ACCEPT;
        }
 
-       if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
-           nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) {
-               icmpv6_error_log(skb, net, pf, "ICMPv6 checksum failed");
+       if (state->hook == NF_INET_PRE_ROUTING &&
+           state->net->ct.sysctl_checksum &&
+           nf_ip6_checksum(skb, state->hook, dataoff, IPPROTO_ICMPV6)) {
+               icmpv6_error_log(skb, state, "ICMPv6 checksum failed");
                return -NF_ACCEPT;
        }
 
@@ -217,7 +220,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
        if (icmp6h->icmp6_type >= 128)
                return NF_ACCEPT;
 
-       return icmpv6_error_message(net, tmpl, skb, dataoff);
+       return icmpv6_error_message(state->net, tmpl, skb, dataoff);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -343,7 +346,7 @@ static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
        return 0;
 }
 
-static int icmpv6_init_net(struct net *net, u_int16_t proto)
+static int icmpv6_init_net(struct net *net)
 {
        struct nf_icmp_net *in = icmpv6_pernet(net);
        struct nf_proto_net *pn = &in->pn;
@@ -360,13 +363,10 @@ static struct nf_proto_net *icmpv6_get_net_proto(struct net *net)
 
 const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
 {
-       .l3proto                = PF_INET6,
        .l4proto                = IPPROTO_ICMPV6,
        .pkt_to_tuple           = icmpv6_pkt_to_tuple,
        .invert_tuple           = icmpv6_invert_tuple,
        .packet                 = icmpv6_packet,
-       .new                    = icmpv6_new,
-       .error                  = icmpv6_error,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = icmpv6_tuple_to_nlattr,
        .nlattr_tuple_size      = icmpv6_nlattr_tuple_size,
index e4d738d34cd030fe5b2cd1629d436e3fa0fa4557..3d719d3eb9a38c7709b8d224facdad8820ebded4 100644 (file)
@@ -273,11 +273,100 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
        return sctp_conntracks[dir][i][cur_state];
 }
 
+/* Don't need lock here: this conntrack not in circulation yet */
+static noinline bool
+sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
+        const struct sctphdr *sh, unsigned int dataoff)
+{
+       enum sctp_conntrack new_state;
+       const struct sctp_chunkhdr *sch;
+       struct sctp_chunkhdr _sch;
+       u32 offset, count;
+
+       memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp));
+       new_state = SCTP_CONNTRACK_MAX;
+       for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) {
+               new_state = sctp_new_state(IP_CT_DIR_ORIGINAL,
+                                          SCTP_CONNTRACK_NONE, sch->type);
+
+               /* Invalid: delete conntrack */
+               if (new_state == SCTP_CONNTRACK_NONE ||
+                   new_state == SCTP_CONNTRACK_MAX) {
+                       pr_debug("nf_conntrack_sctp: invalid new deleting.\n");
+                       return false;
+               }
+
+               /* Copy the vtag into the state info */
+               if (sch->type == SCTP_CID_INIT) {
+                       struct sctp_inithdr _inithdr, *ih;
+                       /* Sec 8.5.1 (A) */
+                       if (sh->vtag)
+                               return false;
+
+                       ih = skb_header_pointer(skb, offset + sizeof(_sch),
+                                               sizeof(_inithdr), &_inithdr);
+                       if (!ih)
+                               return false;
+
+                       pr_debug("Setting vtag %x for new conn\n",
+                                ih->init_tag);
+
+                       ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag;
+               } else if (sch->type == SCTP_CID_HEARTBEAT) {
+                       pr_debug("Setting vtag %x for secondary conntrack\n",
+                                sh->vtag);
+                       ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
+               } else {
+               /* If it is a shutdown ack OOTB packet, we expect a return
+                  shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
+                       pr_debug("Setting vtag %x for new conn OOTB\n",
+                                sh->vtag);
+                       ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
+               }
+
+               ct->proto.sctp.state = new_state;
+       }
+
+       return true;
+}
+
+static bool sctp_error(struct sk_buff *skb,
+                      unsigned int dataoff,
+                      const struct nf_hook_state *state)
+{
+       const struct sctphdr *sh;
+       const char *logmsg;
+
+       if (skb->len < dataoff + sizeof(struct sctphdr)) {
+               logmsg = "nf_ct_sctp: short packet ";
+               goto out_invalid;
+       }
+       if (state->hook == NF_INET_PRE_ROUTING &&
+           state->net->ct.sysctl_checksum &&
+           skb->ip_summed == CHECKSUM_NONE) {
+               if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
+                       logmsg = "nf_ct_sctp: failed to read header ";
+                       goto out_invalid;
+               }
+               sh = (const struct sctphdr *)(skb->data + dataoff);
+               if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
+                       logmsg = "nf_ct_sctp: bad CRC ";
+                       goto out_invalid;
+               }
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       }
+       return false;
+out_invalid:
+       nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_SCTP, "%s", logmsg);
+       return true;
+}
+
 /* Returns verdict for packet, or -NF_ACCEPT for invalid. */
 static int sctp_packet(struct nf_conn *ct,
-                      const struct sk_buff *skb,
+                      struct sk_buff *skb,
                       unsigned int dataoff,
-                      enum ip_conntrack_info ctinfo)
+                      enum ip_conntrack_info ctinfo,
+                      const struct nf_hook_state *state)
 {
        enum sctp_conntrack new_state, old_state;
        enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@@ -289,6 +378,9 @@ static int sctp_packet(struct nf_conn *ct,
        unsigned int *timeouts;
        unsigned long map[256 / sizeof(unsigned long)] = { 0 };
 
+       if (sctp_error(skb, dataoff, state))
+               return -NF_ACCEPT;
+
        sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
        if (sh == NULL)
                goto out;
@@ -296,6 +388,17 @@ static int sctp_packet(struct nf_conn *ct,
        if (do_basic_checks(ct, skb, dataoff, map) != 0)
                goto out;
 
+       if (!nf_ct_is_confirmed(ct)) {
+               /* If an OOTB packet has any of these chunks discard (Sec 8.4) */
+               if (test_bit(SCTP_CID_ABORT, map) ||
+                   test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) ||
+                   test_bit(SCTP_CID_COOKIE_ACK, map))
+                       return -NF_ACCEPT;
+
+               if (!sctp_new(ct, skb, sh, dataoff))
+                       return -NF_ACCEPT;
+       }
+
        /* Check the verification tag (Sec 8.5) */
        if (!test_bit(SCTP_CID_INIT, map) &&
            !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) &&
@@ -397,110 +500,6 @@ out:
        return -NF_ACCEPT;
 }
 
-/* Called when a new connection for this protocol found. */
-static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
-                    unsigned int dataoff)
-{
-       enum sctp_conntrack new_state;
-       const struct sctphdr *sh;
-       struct sctphdr _sctph;
-       const struct sctp_chunkhdr *sch;
-       struct sctp_chunkhdr _sch;
-       u_int32_t offset, count;
-       unsigned long map[256 / sizeof(unsigned long)] = { 0 };
-
-       sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
-       if (sh == NULL)
-               return false;
-
-       if (do_basic_checks(ct, skb, dataoff, map) != 0)
-               return false;
-
-       /* If an OOTB packet has any of these chunks discard (Sec 8.4) */
-       if (test_bit(SCTP_CID_ABORT, map) ||
-           test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) ||
-           test_bit(SCTP_CID_COOKIE_ACK, map))
-               return false;
-
-       memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp));
-       new_state = SCTP_CONNTRACK_MAX;
-       for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
-               /* Don't need lock here: this conntrack not in circulation yet */
-               new_state = sctp_new_state(IP_CT_DIR_ORIGINAL,
-                                          SCTP_CONNTRACK_NONE, sch->type);
-
-               /* Invalid: delete conntrack */
-               if (new_state == SCTP_CONNTRACK_NONE ||
-                   new_state == SCTP_CONNTRACK_MAX) {
-                       pr_debug("nf_conntrack_sctp: invalid new deleting.\n");
-                       return false;
-               }
-
-               /* Copy the vtag into the state info */
-               if (sch->type == SCTP_CID_INIT) {
-                       struct sctp_inithdr _inithdr, *ih;
-                       /* Sec 8.5.1 (A) */
-                       if (sh->vtag)
-                               return false;
-
-                       ih = skb_header_pointer(skb, offset + sizeof(_sch),
-                                               sizeof(_inithdr), &_inithdr);
-                       if (!ih)
-                               return false;
-
-                       pr_debug("Setting vtag %x for new conn\n",
-                                ih->init_tag);
-
-                       ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag;
-               } else if (sch->type == SCTP_CID_HEARTBEAT) {
-                       pr_debug("Setting vtag %x for secondary conntrack\n",
-                                sh->vtag);
-                       ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
-               }
-               /* If it is a shutdown ack OOTB packet, we expect a return
-                  shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
-               else {
-                       pr_debug("Setting vtag %x for new conn OOTB\n",
-                                sh->vtag);
-                       ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
-               }
-
-               ct->proto.sctp.state = new_state;
-       }
-
-       return true;
-}
-
-static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
-                     unsigned int dataoff,
-                     u8 pf, unsigned int hooknum)
-{
-       const struct sctphdr *sh;
-       const char *logmsg;
-
-       if (skb->len < dataoff + sizeof(struct sctphdr)) {
-               logmsg = "nf_ct_sctp: short packet ";
-               goto out_invalid;
-       }
-       if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
-           skb->ip_summed == CHECKSUM_NONE) {
-               if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
-                       logmsg = "nf_ct_sctp: failed to read header ";
-                       goto out_invalid;
-               }
-               sh = (const struct sctphdr *)(skb->data + dataoff);
-               if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
-                       logmsg = "nf_ct_sctp: bad CRC ";
-                       goto out_invalid;
-               }
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       }
-       return NF_ACCEPT;
-out_invalid:
-       nf_l4proto_log_invalid(skb, net, pf, IPPROTO_SCTP, "%s", logmsg);
-       return -NF_ACCEPT;
-}
-
 static bool sctp_can_early_drop(const struct nf_conn *ct)
 {
        switch (ct->proto.sctp.state) {
@@ -735,7 +734,7 @@ static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
        return 0;
 }
 
-static int sctp_init_net(struct net *net, u_int16_t proto)
+static int sctp_init_net(struct net *net)
 {
        struct nf_sctp_net *sn = sctp_pernet(net);
        struct nf_proto_net *pn = &sn->pn;
@@ -760,49 +759,12 @@ static struct nf_proto_net *sctp_get_net_proto(struct net *net)
        return &net->ct.nf_ct_proto.sctp.pn;
 }
 
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
-       .l3proto                = PF_INET,
-       .l4proto                = IPPROTO_SCTP,
-#ifdef CONFIG_NF_CONNTRACK_PROCFS
-       .print_conntrack        = sctp_print_conntrack,
-#endif
-       .packet                 = sctp_packet,
-       .new                    = sctp_new,
-       .error                  = sctp_error,
-       .can_early_drop         = sctp_can_early_drop,
-       .me                     = THIS_MODULE,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-       .nlattr_size            = SCTP_NLATTR_SIZE,
-       .to_nlattr              = sctp_to_nlattr,
-       .from_nlattr            = nlattr_to_sctp,
-       .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
-       .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
-       .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
-       .nla_policy             = nf_ct_port_nla_policy,
-#endif
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-       .ctnl_timeout           = {
-               .nlattr_to_obj  = sctp_timeout_nlattr_to_obj,
-               .obj_to_nlattr  = sctp_timeout_obj_to_nlattr,
-               .nlattr_max     = CTA_TIMEOUT_SCTP_MAX,
-               .obj_size       = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
-               .nla_policy     = sctp_timeout_nla_policy,
-       },
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-       .init_net               = sctp_init_net,
-       .get_net_proto          = sctp_get_net_proto,
-};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp4);
-
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
-       .l3proto                = PF_INET6,
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp = {
        .l4proto                = IPPROTO_SCTP,
 #ifdef CONFIG_NF_CONNTRACK_PROCFS
        .print_conntrack        = sctp_print_conntrack,
 #endif
        .packet                 = sctp_packet,
-       .new                    = sctp_new,
-       .error                  = sctp_error,
        .can_early_drop         = sctp_can_early_drop,
        .me                     = THIS_MODULE,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -826,4 +788,3 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
        .init_net               = sctp_init_net,
        .get_net_proto          = sctp_get_net_proto,
 };
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp6);
index b4bdf9eda7b740dccb6501c5b0155c32d17d7e5b..1bcf9984d45e8601646cb2b99dc5f3113a5c8b0a 100644 (file)
@@ -717,35 +717,26 @@ static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
        [TCPHDR_ACK|TCPHDR_URG]                 = 1,
 };
 
-static void tcp_error_log(const struct sk_buff *skb, struct net *net,
-                         u8 pf, const char *msg)
+static void tcp_error_log(const struct sk_buff *skb,
+                         const struct nf_hook_state *state,
+                         const char *msg)
 {
-       nf_l4proto_log_invalid(skb, net, pf, IPPROTO_TCP, "%s", msg);
+       nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_TCP, "%s", msg);
 }
 
 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
-static int tcp_error(struct net *net, struct nf_conn *tmpl,
-                    struct sk_buff *skb,
-                    unsigned int dataoff,
-                    u_int8_t pf,
-                    unsigned int hooknum)
+static bool tcp_error(const struct tcphdr *th,
+                     struct sk_buff *skb,
+                     unsigned int dataoff,
+                     const struct nf_hook_state *state)
 {
-       const struct tcphdr *th;
-       struct tcphdr _tcph;
        unsigned int tcplen = skb->len - dataoff;
-       u_int8_t tcpflags;
-
-       /* Smaller that minimal TCP header? */
-       th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
-       if (th == NULL) {
-               tcp_error_log(skb, net, pf, "short packet");
-               return -NF_ACCEPT;
-       }
+       u8 tcpflags;
 
        /* Not whole TCP header or malformed packet */
        if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
-               tcp_error_log(skb, net, pf, "truncated packet");
-               return -NF_ACCEPT;
+               tcp_error_log(skb, state, "truncated packet");
+               return true;
        }
 
        /* Checksum invalid? Ignore.
@@ -753,27 +744,101 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
         * because the checksum is assumed to be correct.
         */
        /* FIXME: Source route IP option packets --RR */
-       if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
-           nf_checksum(skb, hooknum, dataoff, IPPROTO_TCP, pf)) {
-               tcp_error_log(skb, net, pf, "bad checksum");
-               return -NF_ACCEPT;
+       if (state->net->ct.sysctl_checksum &&
+           state->hook == NF_INET_PRE_ROUTING &&
+           nf_checksum(skb, state->hook, dataoff, IPPROTO_TCP, state->pf)) {
+               tcp_error_log(skb, state, "bad checksum");
+               return true;
        }
 
        /* Check TCP flags. */
        tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
        if (!tcp_valid_flags[tcpflags]) {
-               tcp_error_log(skb, net, pf, "invalid tcp flag combination");
-               return -NF_ACCEPT;
+               tcp_error_log(skb, state, "invalid tcp flag combination");
+               return true;
        }
 
-       return NF_ACCEPT;
+       return false;
+}
+
+static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
+                            unsigned int dataoff,
+                            const struct tcphdr *th)
+{
+       enum tcp_conntrack new_state;
+       struct net *net = nf_ct_net(ct);
+       const struct nf_tcp_net *tn = tcp_pernet(net);
+       const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
+       const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
+
+       /* Don't need lock here: this conntrack not in circulation yet */
+       new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
+
+       /* Invalid: delete conntrack */
+       if (new_state >= TCP_CONNTRACK_MAX) {
+               pr_debug("nf_ct_tcp: invalid new deleting.\n");
+               return false;
+       }
+
+       if (new_state == TCP_CONNTRACK_SYN_SENT) {
+               memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
+               /* SYN packet */
+               ct->proto.tcp.seen[0].td_end =
+                       segment_seq_plus_len(ntohl(th->seq), skb->len,
+                                            dataoff, th);
+               ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
+               if (ct->proto.tcp.seen[0].td_maxwin == 0)
+                       ct->proto.tcp.seen[0].td_maxwin = 1;
+               ct->proto.tcp.seen[0].td_maxend =
+                       ct->proto.tcp.seen[0].td_end;
+
+               tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
+       } else if (tn->tcp_loose == 0) {
+               /* Don't try to pick up connections. */
+               return false;
+       } else {
+               memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
+               /*
+                * We are in the middle of a connection,
+                * its history is lost for us.
+                * Let's try to use the data from the packet.
+                */
+               ct->proto.tcp.seen[0].td_end =
+                       segment_seq_plus_len(ntohl(th->seq), skb->len,
+                                            dataoff, th);
+               ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
+               if (ct->proto.tcp.seen[0].td_maxwin == 0)
+                       ct->proto.tcp.seen[0].td_maxwin = 1;
+               ct->proto.tcp.seen[0].td_maxend =
+                       ct->proto.tcp.seen[0].td_end +
+                       ct->proto.tcp.seen[0].td_maxwin;
+
+               /* We assume SACK and liberal window checking to handle
+                * window scaling */
+               ct->proto.tcp.seen[0].flags =
+               ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
+                                             IP_CT_TCP_FLAG_BE_LIBERAL;
+       }
+
+       /* tcp_packet will set them */
+       ct->proto.tcp.last_index = TCP_NONE_SET;
+
+       pr_debug("%s: sender end=%u maxend=%u maxwin=%u scale=%i "
+                "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
+                __func__,
+                sender->td_end, sender->td_maxend, sender->td_maxwin,
+                sender->td_scale,
+                receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
+                receiver->td_scale);
+       return true;
 }
 
 /* Returns verdict for packet, or -1 for invalid. */
 static int tcp_packet(struct nf_conn *ct,
-                     const struct sk_buff *skb,
+                     struct sk_buff *skb,
                      unsigned int dataoff,
-                     enum ip_conntrack_info ctinfo)
+                     enum ip_conntrack_info ctinfo,
+                     const struct nf_hook_state *state)
 {
        struct net *net = nf_ct_net(ct);
        struct nf_tcp_net *tn = tcp_pernet(net);
@@ -786,7 +851,14 @@ static int tcp_packet(struct nf_conn *ct,
        unsigned long timeout;
 
        th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
-       BUG_ON(th == NULL);
+       if (th == NULL)
+               return -NF_ACCEPT;
+
+       if (tcp_error(th, skb, dataoff, state))
+               return -NF_ACCEPT;
+
+       if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th))
+               return -NF_ACCEPT;
 
        spin_lock_bh(&ct->lock);
        old_state = ct->proto.tcp.state;
@@ -1067,82 +1139,6 @@ static int tcp_packet(struct nf_conn *ct,
        return NF_ACCEPT;
 }
 
-/* Called when a new connection for this protocol found. */
-static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
-                   unsigned int dataoff)
-{
-       enum tcp_conntrack new_state;
-       const struct tcphdr *th;
-       struct tcphdr _tcph;
-       struct net *net = nf_ct_net(ct);
-       struct nf_tcp_net *tn = tcp_pernet(net);
-       const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
-       const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
-
-       th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
-       BUG_ON(th == NULL);
-
-       /* Don't need lock here: this conntrack not in circulation yet */
-       new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
-
-       /* Invalid: delete conntrack */
-       if (new_state >= TCP_CONNTRACK_MAX) {
-               pr_debug("nf_ct_tcp: invalid new deleting.\n");
-               return false;
-       }
-
-       if (new_state == TCP_CONNTRACK_SYN_SENT) {
-               memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
-               /* SYN packet */
-               ct->proto.tcp.seen[0].td_end =
-                       segment_seq_plus_len(ntohl(th->seq), skb->len,
-                                            dataoff, th);
-               ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
-               if (ct->proto.tcp.seen[0].td_maxwin == 0)
-                       ct->proto.tcp.seen[0].td_maxwin = 1;
-               ct->proto.tcp.seen[0].td_maxend =
-                       ct->proto.tcp.seen[0].td_end;
-
-               tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
-       } else if (tn->tcp_loose == 0) {
-               /* Don't try to pick up connections. */
-               return false;
-       } else {
-               memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
-               /*
-                * We are in the middle of a connection,
-                * its history is lost for us.
-                * Let's try to use the data from the packet.
-                */
-               ct->proto.tcp.seen[0].td_end =
-                       segment_seq_plus_len(ntohl(th->seq), skb->len,
-                                            dataoff, th);
-               ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
-               if (ct->proto.tcp.seen[0].td_maxwin == 0)
-                       ct->proto.tcp.seen[0].td_maxwin = 1;
-               ct->proto.tcp.seen[0].td_maxend =
-                       ct->proto.tcp.seen[0].td_end +
-                       ct->proto.tcp.seen[0].td_maxwin;
-
-               /* We assume SACK and liberal window checking to handle
-                * window scaling */
-               ct->proto.tcp.seen[0].flags =
-               ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
-                                             IP_CT_TCP_FLAG_BE_LIBERAL;
-       }
-
-       /* tcp_packet will set them */
-       ct->proto.tcp.last_index = TCP_NONE_SET;
-
-       pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
-                "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
-                sender->td_end, sender->td_maxend, sender->td_maxwin,
-                sender->td_scale,
-                receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
-                receiver->td_scale);
-       return true;
-}
-
 static bool tcp_can_early_drop(const struct nf_conn *ct)
 {
        switch (ct->proto.tcp.state) {
@@ -1213,8 +1209,8 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
 #define TCP_NLATTR_SIZE        ( \
        NLA_ALIGN(NLA_HDRLEN + 1) + \
        NLA_ALIGN(NLA_HDRLEN + 1) + \
-       NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
-       NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
+       NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
+       NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
 
 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
 {
@@ -1510,7 +1506,7 @@ static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
        return 0;
 }
 
-static int tcp_init_net(struct net *net, u_int16_t proto)
+static int tcp_init_net(struct net *net)
 {
        struct nf_tcp_net *tn = tcp_pernet(net);
        struct nf_proto_net *pn = &tn->pn;
@@ -1538,16 +1534,13 @@ static struct nf_proto_net *tcp_get_net_proto(struct net *net)
        return &net->ct.nf_ct_proto.tcp.pn;
 }
 
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
 {
-       .l3proto                = PF_INET,
        .l4proto                = IPPROTO_TCP,
 #ifdef CONFIG_NF_CONNTRACK_PROCFS
        .print_conntrack        = tcp_print_conntrack,
 #endif
        .packet                 = tcp_packet,
-       .new                    = tcp_new,
-       .error                  = tcp_error,
        .can_early_drop         = tcp_can_early_drop,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = tcp_to_nlattr,
@@ -1571,39 +1564,3 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
        .init_net               = tcp_init_net,
        .get_net_proto          = tcp_get_net_proto,
 };
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
-
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
-{
-       .l3proto                = PF_INET6,
-       .l4proto                = IPPROTO_TCP,
-#ifdef CONFIG_NF_CONNTRACK_PROCFS
-       .print_conntrack        = tcp_print_conntrack,
-#endif
-       .packet                 = tcp_packet,
-       .new                    = tcp_new,
-       .error                  = tcp_error,
-       .can_early_drop         = tcp_can_early_drop,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-       .nlattr_size            = TCP_NLATTR_SIZE,
-       .to_nlattr              = tcp_to_nlattr,
-       .from_nlattr            = nlattr_to_tcp,
-       .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
-       .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
-       .nlattr_tuple_size      = tcp_nlattr_tuple_size,
-       .nla_policy             = nf_ct_port_nla_policy,
-#endif
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-       .ctnl_timeout           = {
-               .nlattr_to_obj  = tcp_timeout_nlattr_to_obj,
-               .obj_to_nlattr  = tcp_timeout_obj_to_nlattr,
-               .nlattr_max     = CTA_TIMEOUT_TCP_MAX,
-               .obj_size       = sizeof(unsigned int) *
-                                       TCP_CONNTRACK_TIMEOUT_MAX,
-               .nla_policy     = tcp_timeout_nla_policy,
-       },
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-       .init_net               = tcp_init_net,
-       .get_net_proto          = tcp_get_net_proto,
-};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6);
index 3065fb8ef91b74348f95900a7b3b0cfc92970469..a7aa70370913ce7e8914343270152fb009eb2a63 100644 (file)
@@ -42,14 +42,65 @@ static unsigned int *udp_get_timeouts(struct net *net)
        return udp_pernet(net)->timeouts;
 }
 
+static void udp_error_log(const struct sk_buff *skb,
+                         const struct nf_hook_state *state,
+                         const char *msg)
+{
+       nf_l4proto_log_invalid(skb, state->net, state->pf,
+                              IPPROTO_UDP, "%s", msg);
+}
+
+static bool udp_error(struct sk_buff *skb,
+                     unsigned int dataoff,
+                     const struct nf_hook_state *state)
+{
+       unsigned int udplen = skb->len - dataoff;
+       const struct udphdr *hdr;
+       struct udphdr _hdr;
+
+       /* Header is too small? */
+       hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+       if (!hdr) {
+               udp_error_log(skb, state, "short packet");
+               return true;
+       }
+
+       /* Truncated/malformed packets */
+       if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) {
+               udp_error_log(skb, state, "truncated/malformed packet");
+               return true;
+       }
+
+       /* Packet with no checksum */
+       if (!hdr->check)
+               return false;
+
+       /* Checksum invalid? Ignore.
+        * We skip checking packets on the outgoing path
+        * because the checksum is assumed to be correct.
+        * FIXME: Source route IP option packets --RR */
+       if (state->hook == NF_INET_PRE_ROUTING &&
+           state->net->ct.sysctl_checksum &&
+           nf_checksum(skb, state->hook, dataoff, IPPROTO_UDP, state->pf)) {
+               udp_error_log(skb, state, "bad checksum");
+               return true;
+       }
+
+       return false;
+}
+
 /* Returns verdict for packet, and may modify conntracktype */
 static int udp_packet(struct nf_conn *ct,
-                     const struct sk_buff *skb,
+                     struct sk_buff *skb,
                      unsigned int dataoff,
-                     enum ip_conntrack_info ctinfo)
+                     enum ip_conntrack_info ctinfo,
+                     const struct nf_hook_state *state)
 {
        unsigned int *timeouts;
 
+       if (udp_error(skb, dataoff, state))
+               return -NF_ACCEPT;
+
        timeouts = nf_ct_timeout_lookup(ct);
        if (!timeouts)
                timeouts = udp_get_timeouts(nf_ct_net(ct));
@@ -69,24 +120,18 @@ static int udp_packet(struct nf_conn *ct,
        return NF_ACCEPT;
 }
 
-/* Called when a new connection for this protocol found. */
-static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
-                   unsigned int dataoff)
-{
-       return true;
-}
-
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
-static void udplite_error_log(const struct sk_buff *skb, struct net *net,
-                             u8 pf, const char *msg)
+static void udplite_error_log(const struct sk_buff *skb,
+                             const struct nf_hook_state *state,
+                             const char *msg)
 {
-       nf_l4proto_log_invalid(skb, net, pf, IPPROTO_UDPLITE, "%s", msg);
+       nf_l4proto_log_invalid(skb, state->net, state->pf,
+                              IPPROTO_UDPLITE, "%s", msg);
 }
 
-static int udplite_error(struct net *net, struct nf_conn *tmpl,
-                        struct sk_buff *skb,
-                        unsigned int dataoff,
-                        u8 pf, unsigned int hooknum)
+static bool udplite_error(struct sk_buff *skb,
+                         unsigned int dataoff,
+                         const struct nf_hook_state *state)
 {
        unsigned int udplen = skb->len - dataoff;
        const struct udphdr *hdr;
@@ -96,80 +141,67 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
        /* Header is too small? */
        hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
        if (!hdr) {
-               udplite_error_log(skb, net, pf, "short packet");
-               return -NF_ACCEPT;
+               udplite_error_log(skb, state, "short packet");
+               return true;
        }
 
        cscov = ntohs(hdr->len);
        if (cscov == 0) {
                cscov = udplen;
        } else if (cscov < sizeof(*hdr) || cscov > udplen) {
-               udplite_error_log(skb, net, pf, "invalid checksum coverage");
-               return -NF_ACCEPT;
+               udplite_error_log(skb, state, "invalid checksum coverage");
+               return true;
        }
 
        /* UDPLITE mandates checksums */
        if (!hdr->check) {
-               udplite_error_log(skb, net, pf, "checksum missing");
-               return -NF_ACCEPT;
+               udplite_error_log(skb, state, "checksum missing");
+               return true;
        }
 
        /* Checksum invalid? Ignore. */
-       if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
-           nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP,
-                               pf)) {
-               udplite_error_log(skb, net, pf, "bad checksum");
-               return -NF_ACCEPT;
+       if (state->hook == NF_INET_PRE_ROUTING &&
+           state->net->ct.sysctl_checksum &&
+           nf_checksum_partial(skb, state->hook, dataoff, cscov, IPPROTO_UDP,
+                               state->pf)) {
+               udplite_error_log(skb, state, "bad checksum");
+               return true;
        }
 
-       return NF_ACCEPT;
-}
-#endif
-
-static void udp_error_log(const struct sk_buff *skb, struct net *net,
-                         u8 pf, const char *msg)
-{
-       nf_l4proto_log_invalid(skb, net, pf, IPPROTO_UDP, "%s", msg);
+       return false;
 }
 
-static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
-                    unsigned int dataoff,
-                    u_int8_t pf,
-                    unsigned int hooknum)
+/* Returns verdict for packet, and may modify conntracktype */
+static int udplite_packet(struct nf_conn *ct,
+                         struct sk_buff *skb,
+                         unsigned int dataoff,
+                         enum ip_conntrack_info ctinfo,
+                         const struct nf_hook_state *state)
 {
-       unsigned int udplen = skb->len - dataoff;
-       const struct udphdr *hdr;
-       struct udphdr _hdr;
-
-       /* Header is too small? */
-       hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
-       if (hdr == NULL) {
-               udp_error_log(skb, net, pf, "short packet");
-               return -NF_ACCEPT;
-       }
+       unsigned int *timeouts;
 
-       /* Truncated/malformed packets */
-       if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) {
-               udp_error_log(skb, net, pf, "truncated/malformed packet");
+       if (udplite_error(skb, dataoff, state))
                return -NF_ACCEPT;
-       }
 
-       /* Packet with no checksum */
-       if (!hdr->check)
-               return NF_ACCEPT;
+       timeouts = nf_ct_timeout_lookup(ct);
+       if (!timeouts)
+               timeouts = udp_get_timeouts(nf_ct_net(ct));
 
-       /* Checksum invalid? Ignore.
-        * We skip checking packets on the outgoing path
-        * because the checksum is assumed to be correct.
-        * FIXME: Source route IP option packets --RR */
-       if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
-           nf_checksum(skb, hooknum, dataoff, IPPROTO_UDP, pf)) {
-               udp_error_log(skb, net, pf, "bad checksum");
-               return -NF_ACCEPT;
+       /* If we've seen traffic both ways, this is some kind of UDP
+          stream.  Extend timeout. */
+       if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+               nf_ct_refresh_acct(ct, ctinfo, skb,
+                                  timeouts[UDP_CT_REPLIED]);
+               /* Also, more likely to be important, and not a probe */
+               if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
+                       nf_conntrack_event_cache(IPCT_ASSURED, ct);
+       } else {
+               nf_ct_refresh_acct(ct, ctinfo, skb,
+                                  timeouts[UDP_CT_UNREPLIED]);
        }
-
        return NF_ACCEPT;
 }
+#endif
 
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
@@ -258,7 +290,7 @@ static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
        return 0;
 }
 
-static int udp_init_net(struct net *net, u_int16_t proto)
+static int udp_init_net(struct net *net)
 {
        struct nf_udp_net *un = udp_pernet(net);
        struct nf_proto_net *pn = &un->pn;
@@ -278,72 +310,11 @@ static struct nf_proto_net *udp_get_net_proto(struct net *net)
        return &net->ct.nf_ct_proto.udp.pn;
 }
 
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
-{
-       .l3proto                = PF_INET,
-       .l4proto                = IPPROTO_UDP,
-       .allow_clash            = true,
-       .packet                 = udp_packet,
-       .new                    = udp_new,
-       .error                  = udp_error,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-       .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
-       .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
-       .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
-       .nla_policy             = nf_ct_port_nla_policy,
-#endif
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-       .ctnl_timeout           = {
-               .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
-               .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
-               .nlattr_max     = CTA_TIMEOUT_UDP_MAX,
-               .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
-               .nla_policy     = udp_timeout_nla_policy,
-       },
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-       .init_net               = udp_init_net,
-       .get_net_proto          = udp_get_net_proto,
-};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
-
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
-{
-       .l3proto                = PF_INET,
-       .l4proto                = IPPROTO_UDPLITE,
-       .allow_clash            = true,
-       .packet                 = udp_packet,
-       .new                    = udp_new,
-       .error                  = udplite_error,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-       .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
-       .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
-       .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
-       .nla_policy             = nf_ct_port_nla_policy,
-#endif
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-       .ctnl_timeout           = {
-               .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
-               .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
-               .nlattr_max     = CTA_TIMEOUT_UDP_MAX,
-               .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
-               .nla_policy     = udp_timeout_nla_policy,
-       },
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-       .init_net               = udp_init_net,
-       .get_net_proto          = udp_get_net_proto,
-};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4);
-#endif
-
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp =
 {
-       .l3proto                = PF_INET6,
        .l4proto                = IPPROTO_UDP,
        .allow_clash            = true,
        .packet                 = udp_packet,
-       .new                    = udp_new,
-       .error                  = udp_error,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
@@ -362,17 +333,13 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
 
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite =
 {
-       .l3proto                = PF_INET6,
        .l4proto                = IPPROTO_UDPLITE,
        .allow_clash            = true,
-       .packet                 = udp_packet,
-       .new                    = udp_new,
-       .error                  = udplite_error,
+       .packet                 = udplite_packet,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
@@ -391,5 +358,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
 #endif
index 13279f683da9786f3b6fea88b96ba96bc07f62c5..463d17d349c1bca02361fbb625059f93b34e3bc0 100644 (file)
@@ -292,7 +292,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
        if (!net_eq(nf_ct_net(ct), net))
                goto release;
 
-       l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+       l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
        WARN_ON(!l4proto);
 
        ret = -ENOSPC;
@@ -720,10 +720,3 @@ static void __exit nf_conntrack_standalone_fini(void)
 
 module_init(nf_conntrack_standalone_init);
 module_exit(nf_conntrack_standalone_fini);
-
-/* Some modules need us, but don't depend directly on any symbol.
-   They should call this. */
-void need_conntrack(void)
-{
-}
-EXPORT_SYMBOL_GPL(need_conntrack);
index d8125616edc79dd311c12dd1806c97dd4e24f4e4..185c633b6872b1cf9b04b76f09b14ca2b4a3c5ea 100644 (file)
@@ -120,7 +120,7 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct)
        if (l4num == IPPROTO_TCP)
                flow_offload_fixup_tcp(&ct->proto.tcp);
 
-       l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), l4num);
+       l4proto = __nf_ct_l4proto_find(l4num);
        if (!l4proto)
                return;
 
@@ -233,8 +233,8 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
        struct flow_offload *flow;
        int dir;
 
-       tuplehash = rhashtable_lookup_fast(&flow_table->rhashtable, tuple,
-                                          nf_flow_offload_rhash_params);
+       tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
+                                     nf_flow_offload_rhash_params);
        if (!tuplehash)
                return NULL;
 
@@ -254,20 +254,17 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table,
        struct flow_offload_tuple_rhash *tuplehash;
        struct rhashtable_iter hti;
        struct flow_offload *flow;
-       int err;
-
-       err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
-       if (err)
-               return err;
+       int err = 0;
 
+       rhashtable_walk_enter(&flow_table->rhashtable, &hti);
        rhashtable_walk_start(&hti);
 
        while ((tuplehash = rhashtable_walk_next(&hti))) {
                if (IS_ERR(tuplehash)) {
-                       err = PTR_ERR(tuplehash);
-                       if (err != -EAGAIN)
-                               goto out;
-
+                       if (PTR_ERR(tuplehash) != -EAGAIN) {
+                               err = PTR_ERR(tuplehash);
+                               break;
+                       }
                        continue;
                }
                if (tuplehash->tuple.dir)
@@ -277,7 +274,6 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table,
 
                iter(flow, data);
        }
-out:
        rhashtable_walk_stop(&hti);
        rhashtable_walk_exit(&hti);
 
@@ -290,25 +286,19 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow)
        return (__s32)(flow->timeout - (u32)jiffies) <= 0;
 }
 
-static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
+static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
 {
        struct flow_offload_tuple_rhash *tuplehash;
        struct rhashtable_iter hti;
        struct flow_offload *flow;
-       int err;
-
-       err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
-       if (err)
-               return 0;
 
+       rhashtable_walk_enter(&flow_table->rhashtable, &hti);
        rhashtable_walk_start(&hti);
 
        while ((tuplehash = rhashtable_walk_next(&hti))) {
                if (IS_ERR(tuplehash)) {
-                       err = PTR_ERR(tuplehash);
-                       if (err != -EAGAIN)
-                               goto out;
-
+                       if (PTR_ERR(tuplehash) != -EAGAIN)
+                               break;
                        continue;
                }
                if (tuplehash->tuple.dir)
@@ -321,11 +311,8 @@ static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
                                    FLOW_OFFLOAD_TEARDOWN)))
                        flow_offload_del(flow_table, flow);
        }
-out:
        rhashtable_walk_stop(&hti);
        rhashtable_walk_exit(&hti);
-
-       return 1;
 }
 
 static void nf_flow_offload_work_gc(struct work_struct *work)
@@ -514,7 +501,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
        mutex_unlock(&flowtable_lock);
        cancel_delayed_work_sync(&flow_table->gc_work);
        nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
-       WARN_ON(!nf_flow_offload_gc_step(flow_table));
+       nf_flow_offload_gc_step(flow_table);
        rhashtable_destroy(&flow_table->rhashtable);
 }
 EXPORT_SYMBOL_GPL(nf_flow_table_free);
index 15ed91309992e85121f0eb4c3ad01d5be2bdd194..1d291a51cd45b74e5f70b34c304e519f2c41875b 100644 (file)
@@ -254,8 +254,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
        if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
                return NF_ACCEPT;
 
-       if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
-           nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
+       if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
                return NF_DROP;
 
        flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
@@ -471,8 +470,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
        if (skb_try_make_writable(skb, sizeof(*ip6h)))
                return NF_DROP;
 
-       if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
-           nf_flow_nat_ipv6(flow, skb, dir) < 0)
+       if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
                return NF_DROP;
 
        flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
index 99606baedda4903dc4fa360ac63d28fad1109b7e..38793b95d9bca6b6aa884bda1d2590b2824e1060 100644 (file)
@@ -37,7 +37,7 @@ static void mangle_contents(struct sk_buff *skb,
 {
        unsigned char *data;
 
-       BUG_ON(skb_is_nonlinear(skb));
+       SKB_LINEAR_ASSERT(skb);
        data = skb_network_header(skb) + dataoff;
 
        /* move post-replacement */
@@ -110,8 +110,6 @@ bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
            !enlarge_skb(skb, rep_len - match_len))
                return false;
 
-       SKB_LINEAR_ASSERT(skb);
-
        tcph = (void *)skb->data + protoff;
 
        oldlen = skb->len - protoff;
index adee04af8d43f519402c20b4f1a8bd11929a2159..78a9e6454ff3d712926397beb904b478b8fab0f1 100644 (file)
@@ -52,13 +52,11 @@ nf_nat_redirect_ipv4(struct sk_buff *skb,
 
                newdst = 0;
 
-               rcu_read_lock();
                indev = __in_dev_get_rcu(skb->dev);
                if (indev && indev->ifa_list) {
                        ifa = indev->ifa_list;
                        newdst = ifa->ifa_local;
                }
-               rcu_read_unlock();
 
                if (!newdst)
                        return NF_DROP;
@@ -97,7 +95,6 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
                struct inet6_ifaddr *ifa;
                bool addr = false;
 
-               rcu_read_lock();
                idev = __in6_dev_get(skb->dev);
                if (idev != NULL) {
                        read_lock_bh(&idev->lock);
@@ -108,7 +105,6 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
                        }
                        read_unlock_bh(&idev->lock);
                }
-               rcu_read_unlock();
 
                if (!addr)
                        return NF_DROP;
index 2cfb173cd0b2d8a5e99e5165edcecffcd610af33..f0159eea29780ed93419bce343ec05094691c2a6 100644 (file)
@@ -27,6 +27,8 @@
 static LIST_HEAD(nf_tables_expressions);
 static LIST_HEAD(nf_tables_objects);
 static LIST_HEAD(nf_tables_flowtables);
+static LIST_HEAD(nf_tables_destroy_list);
+static DEFINE_SPINLOCK(nf_tables_destroy_list_lock);
 static u64 table_handle;
 
 enum {
@@ -64,6 +66,8 @@ static void nft_validate_state_update(struct net *net, u8 new_validate_state)
 
        net->nft.validate_state = new_validate_state;
 }
+static void nf_tables_trans_destroy_work(struct work_struct *w);
+static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work);
 
 static void nft_ctx_init(struct nft_ctx *ctx,
                         struct net *net,
@@ -207,6 +211,18 @@ static int nft_delchain(struct nft_ctx *ctx)
        return err;
 }
 
+/* either expr ops provide both activate/deactivate, or neither */
+static bool nft_expr_check_ops(const struct nft_expr_ops *ops)
+{
+       if (!ops)
+               return true;
+
+       if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate)))
+               return false;
+
+       return true;
+}
+
 static void nft_rule_expr_activate(const struct nft_ctx *ctx,
                                   struct nft_rule *rule)
 {
@@ -298,7 +314,7 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
        return 0;
 }
 
-static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
+static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
                             struct nft_set *set)
 {
        struct nft_trans *trans;
@@ -318,7 +334,7 @@ static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
        return 0;
 }
 
-static int nft_delset(struct nft_ctx *ctx, struct nft_set *set)
+static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
 {
        int err;
 
@@ -1005,7 +1021,8 @@ static int nf_tables_deltable(struct net *net, struct sock *nlsk,
 
 static void nf_tables_table_destroy(struct nft_ctx *ctx)
 {
-       BUG_ON(ctx->table->use > 0);
+       if (WARN_ON(ctx->table->use > 0))
+               return;
 
        rhltable_destroy(&ctx->table->chains_ht);
        kfree(ctx->table->name);
@@ -1412,7 +1429,8 @@ static void nf_tables_chain_destroy(struct nft_ctx *ctx)
 {
        struct nft_chain *chain = ctx->chain;
 
-       BUG_ON(chain->use > 0);
+       if (WARN_ON(chain->use > 0))
+               return;
 
        /* no concurrent access possible anymore */
        nf_tables_chain_free_chain_rules(chain);
@@ -1907,6 +1925,9 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
  */
 int nft_register_expr(struct nft_expr_type *type)
 {
+       if (!nft_expr_check_ops(type->ops))
+               return -EINVAL;
+
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
        if (type->family == NFPROTO_UNSPEC)
                list_add_tail_rcu(&type->list, &nf_tables_expressions);
@@ -2054,6 +2075,10 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
                        err = PTR_ERR(ops);
                        goto err1;
                }
+               if (!nft_expr_check_ops(ops)) {
+                       err = -EINVAL;
+                       goto err1;
+               }
        } else
                ops = type->ops;
 
@@ -2434,7 +2459,6 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
 {
        struct nft_expr *expr;
 
-       lockdep_assert_held(&ctx->net->nft.commit_mutex);
        /*
         * Careful: some expressions might not be initialized in case this
         * is called on error from nf_tables_newrule().
@@ -3567,13 +3591,6 @@ static void nft_set_destroy(struct nft_set *set)
        kvfree(set);
 }
 
-static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
-{
-       list_del_rcu(&set->list);
-       nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
-       nft_set_destroy(set);
-}
-
 static int nf_tables_delset(struct net *net, struct sock *nlsk,
                            struct sk_buff *skb, const struct nlmsghdr *nlh,
                            const struct nlattr * const nla[],
@@ -3668,17 +3685,38 @@ bind:
 }
 EXPORT_SYMBOL_GPL(nf_tables_bind_set);
 
-void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
                          struct nft_set_binding *binding)
+{
+       if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
+           nft_is_active(ctx->net, set))
+               list_add_tail_rcu(&set->list, &ctx->table->sets);
+
+       list_add_tail_rcu(&binding->list, &set->bindings);
+}
+EXPORT_SYMBOL_GPL(nf_tables_rebind_set);
+
+void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+                         struct nft_set_binding *binding)
 {
        list_del_rcu(&binding->list);
 
        if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
            nft_is_active(ctx->net, set))
-               nf_tables_set_destroy(ctx, set);
+               list_del_rcu(&set->list);
 }
 EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
 
+void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
+{
+       if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
+           nft_is_active(ctx->net, set)) {
+               nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
+               nft_set_destroy(set);
+       }
+}
+EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
+
 const struct nft_set_ext_type nft_set_ext_types[] = {
        [NFT_SET_EXT_KEY]               = {
                .align  = __alignof__(u32),
@@ -6191,19 +6229,28 @@ static void nft_commit_release(struct nft_trans *trans)
                nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
                break;
        }
+
+       if (trans->put_net)
+               put_net(trans->ctx.net);
+
        kfree(trans);
 }
 
-static void nf_tables_commit_release(struct net *net)
+static void nf_tables_trans_destroy_work(struct work_struct *w)
 {
        struct nft_trans *trans, *next;
+       LIST_HEAD(head);
 
-       if (list_empty(&net->nft.commit_list))
+       spin_lock(&nf_tables_destroy_list_lock);
+       list_splice_init(&nf_tables_destroy_list, &head);
+       spin_unlock(&nf_tables_destroy_list_lock);
+
+       if (list_empty(&head))
                return;
 
        synchronize_rcu();
 
-       list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+       list_for_each_entry_safe(trans, next, &head, list) {
                list_del(&trans->list);
                nft_commit_release(trans);
        }
@@ -6334,6 +6381,37 @@ static void nft_chain_del(struct nft_chain *chain)
        list_del_rcu(&chain->list);
 }
 
+static void nf_tables_commit_release(struct net *net)
+{
+       struct nft_trans *trans;
+
+       /* all side effects have to be made visible.
+        * For example, if a chain named 'foo' has been deleted, a
+        * new transaction must not find it anymore.
+        *
+        * Memory reclaim happens asynchronously from work queue
+        * to prevent expensive synchronize_rcu() in commit phase.
+        */
+       if (list_empty(&net->nft.commit_list)) {
+               mutex_unlock(&net->nft.commit_mutex);
+               return;
+       }
+
+       trans = list_last_entry(&net->nft.commit_list,
+                               struct nft_trans, list);
+       get_net(trans->ctx.net);
+       WARN_ON_ONCE(trans->put_net);
+
+       trans->put_net = true;
+       spin_lock(&nf_tables_destroy_list_lock);
+       list_splice_tail_init(&net->nft.commit_list, &nf_tables_destroy_list);
+       spin_unlock(&nf_tables_destroy_list_lock);
+
+       mutex_unlock(&net->nft.commit_mutex);
+
+       schedule_work(&trans_destroy_work);
+}
+
 static int nf_tables_commit(struct net *net, struct sk_buff *skb)
 {
        struct nft_trans *trans, *next;
@@ -6495,9 +6573,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                }
        }
 
-       nf_tables_commit_release(net);
        nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
-       mutex_unlock(&net->nft.commit_mutex);
+       nf_tables_commit_release(net);
 
        return 0;
 }
@@ -7168,7 +7245,8 @@ int __nft_release_basechain(struct nft_ctx *ctx)
 {
        struct nft_rule *rule, *nr;
 
-       BUG_ON(!nft_is_base_chain(ctx->chain));
+       if (WARN_ON(!nft_is_base_chain(ctx->chain)))
+               return 0;
 
        nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain);
        list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
@@ -7271,6 +7349,7 @@ static int __init nf_tables_module_init(void)
 {
        int err;
 
+       spin_lock_init(&nf_tables_destroy_list_lock);
        err = register_pernet_subsys(&nf_tables_net_ops);
        if (err < 0)
                return err;
@@ -7310,6 +7389,7 @@ static void __exit nf_tables_module_exit(void)
        unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
        nft_chain_filter_fini();
        unregister_pernet_subsys(&nf_tables_net_ops);
+       cancel_work_sync(&trans_destroy_work);
        rcu_barrier();
        nf_tables_core_module_exit();
 }
index ffd5c0f9412b4dab673b3294292a2fa6a7da404b..3fbce3b9c5ec0f51c9841aca36c0e5fc48b233c6 100644 (file)
@@ -249,12 +249,24 @@ static struct nft_expr_type *nft_basic_types[] = {
        &nft_exthdr_type,
 };
 
+static struct nft_object_type *nft_basic_objects[] = {
+#ifdef CONFIG_NETWORK_SECMARK
+       &nft_secmark_obj_type,
+#endif
+};
+
 int __init nf_tables_core_module_init(void)
 {
-       int err, i;
+       int err, i, j = 0;
+
+       for (i = 0; i < ARRAY_SIZE(nft_basic_objects); i++) {
+               err = nft_register_obj(nft_basic_objects[i]);
+               if (err)
+                       goto err;
+       }
 
-       for (i = 0; i < ARRAY_SIZE(nft_basic_types); i++) {
-               err = nft_register_expr(nft_basic_types[i]);
+       for (j = 0; j < ARRAY_SIZE(nft_basic_types); j++) {
+               err = nft_register_expr(nft_basic_types[j]);
                if (err)
                        goto err;
        }
@@ -262,8 +274,12 @@ int __init nf_tables_core_module_init(void)
        return 0;
 
 err:
+       while (j-- > 0)
+               nft_unregister_expr(nft_basic_types[j]);
+
        while (i-- > 0)
-               nft_unregister_expr(nft_basic_types[i]);
+               nft_unregister_obj(nft_basic_objects[i]);
+
        return err;
 }
 
@@ -274,4 +290,8 @@ void nf_tables_core_module_exit(void)
        i = ARRAY_SIZE(nft_basic_types);
        while (i-- > 0)
                nft_unregister_expr(nft_basic_types[i]);
+
+       i = ARRAY_SIZE(nft_basic_objects);
+       while (i-- > 0)
+               nft_unregister_obj(nft_basic_objects[i]);
 }
index a30f8ba4b89ac427053281936ad3e70750e3a2f0..b48545b84ce80de61e7c5b2d356804ed75d928ae 100644 (file)
@@ -53,9 +53,6 @@ ctnl_timeout_parse_policy(void *timeout,
        struct nlattr **tb;
        int ret = 0;
 
-       if (!l4proto->ctnl_timeout.nlattr_to_obj)
-               return 0;
-
        tb = kcalloc(l4proto->ctnl_timeout.nlattr_max + 1, sizeof(*tb),
                     GFP_KERNEL);
 
@@ -125,7 +122,7 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
                return -EBUSY;
        }
 
-       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+       l4proto = nf_ct_l4proto_find_get(l4num);
 
        /* This protocol is not supportted, skip. */
        if (l4proto->l4proto != l4num) {
@@ -167,6 +164,8 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
        struct nfgenmsg *nfmsg;
        unsigned int flags = portid ? NLM_F_MULTI : 0;
        const struct nf_conntrack_l4proto *l4proto = timeout->timeout.l4proto;
+       struct nlattr *nest_parms;
+       int ret;
 
        event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event);
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
@@ -186,22 +185,15 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                         htonl(refcount_read(&timeout->refcnt))))
                goto nla_put_failure;
 
-       if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
-               struct nlattr *nest_parms;
-               int ret;
-
-               nest_parms = nla_nest_start(skb,
-                                           CTA_TIMEOUT_DATA | NLA_F_NESTED);
-               if (!nest_parms)
-                       goto nla_put_failure;
+       nest_parms = nla_nest_start(skb, CTA_TIMEOUT_DATA | NLA_F_NESTED);
+       if (!nest_parms)
+               goto nla_put_failure;
 
-               ret = l4proto->ctnl_timeout.obj_to_nlattr(skb,
-                                                       &timeout->timeout.data);
-               if (ret < 0)
-                       goto nla_put_failure;
+       ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->timeout.data);
+       if (ret < 0)
+               goto nla_put_failure;
 
-               nla_nest_end(skb, nest_parms);
-       }
+       nla_nest_end(skb, nest_parms);
 
        nlmsg_end(skb, nlh);
        return skb->len;
@@ -369,7 +361,7 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl,
 
        l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
        l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
-       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+       l4proto = nf_ct_l4proto_find_get(l4num);
 
        /* This protocol is not supported, skip. */
        if (l4proto->l4proto != l4num) {
@@ -391,12 +383,14 @@ err:
 
 static int
 cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
-                           u32 seq, u32 type, int event,
+                           u32 seq, u32 type, int event, u16 l3num,
                            const struct nf_conntrack_l4proto *l4proto)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
        unsigned int flags = portid ? NLM_F_MULTI : 0;
+       struct nlattr *nest_parms;
+       int ret;
 
        event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event);
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
@@ -408,25 +402,19 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
        nfmsg->version = NFNETLINK_V0;
        nfmsg->res_id = 0;
 
-       if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l4proto->l3proto)) ||
+       if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l3num)) ||
            nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto))
                goto nla_put_failure;
 
-       if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
-               struct nlattr *nest_parms;
-               int ret;
-
-               nest_parms = nla_nest_start(skb,
-                                           CTA_TIMEOUT_DATA | NLA_F_NESTED);
-               if (!nest_parms)
-                       goto nla_put_failure;
+       nest_parms = nla_nest_start(skb, CTA_TIMEOUT_DATA | NLA_F_NESTED);
+       if (!nest_parms)
+               goto nla_put_failure;
 
-               ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
-               if (ret < 0)
-                       goto nla_put_failure;
+       ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
+       if (ret < 0)
+               goto nla_put_failure;
 
-               nla_nest_end(skb, nest_parms);
-       }
+       nla_nest_end(skb, nest_parms);
 
        nlmsg_end(skb, nlh);
        return skb->len;
@@ -454,7 +442,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
 
        l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
        l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
-       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+       l4proto = nf_ct_l4proto_find_get(l4num);
 
        /* This protocol is not supported, skip. */
        if (l4proto->l4proto != l4num) {
@@ -472,6 +460,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
                                          nlh->nlmsg_seq,
                                          NFNL_MSG_TYPE(nlh->nlmsg_type),
                                          IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
+                                         l3num,
                                          l4proto);
        if (ret <= 0) {
                kfree_skb(skb2);
index fa90a8402845d1768fce3741e3173a8493268558..79d48c1d06f4dc192e8b8fd9ba68b0dbe8d7864b 100644 (file)
@@ -79,7 +79,8 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 
        err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc,
                            tb[NFTA_CMP_DATA]);
-       BUG_ON(err < 0);
+       if (err < 0)
+               return err;
 
        priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
        err = nft_validate_register_load(priv->sreg, desc.len);
@@ -129,7 +130,8 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
 
        err = nft_data_init(NULL, &data, sizeof(data), &desc,
                            tb[NFTA_CMP_DATA]);
-       BUG_ON(err < 0);
+       if (err < 0)
+               return err;
 
        priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
        err = nft_validate_register_load(priv->sreg, desc.len);
index 5dd87748afa8a9185de95c439881f72d3f96e798..586627c361dfcf8026505d1bff3b5287b2e3e96f 100644 (file)
@@ -279,7 +279,7 @@ static void nft_ct_set_eval(const struct nft_expr *expr,
 {
        const struct nft_ct *priv = nft_expr_priv(expr);
        struct sk_buff *skb = pkt->skb;
-#ifdef CONFIG_NF_CONNTRACK_MARK
+#if defined(CONFIG_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_SECMARK)
        u32 value = regs->data[priv->sreg];
 #endif
        enum ip_conntrack_info ctinfo;
@@ -298,6 +298,14 @@ static void nft_ct_set_eval(const struct nft_expr *expr,
                }
                break;
 #endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+       case NFT_CT_SECMARK:
+               if (ct->secmark != value) {
+                       ct->secmark = value;
+                       nf_conntrack_event_cache(IPCT_SECMARK, ct);
+               }
+               break;
+#endif
 #ifdef CONFIG_NF_CONNTRACK_LABELS
        case NFT_CT_LABELS:
                nf_connlabels_replace(ct,
@@ -564,6 +572,13 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
                        return -EINVAL;
                len = sizeof(u32);
                break;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+       case NFT_CT_SECMARK:
+               if (tb[NFTA_CT_DIRECTION])
+                       return -EINVAL;
+               len = sizeof(u32);
+               break;
 #endif
        default:
                return -EOPNOTSUPP;
@@ -776,9 +791,6 @@ nft_ct_timeout_parse_policy(void *timeouts,
        struct nlattr **tb;
        int ret = 0;
 
-       if (!l4proto->ctnl_timeout.nlattr_to_obj)
-               return 0;
-
        tb = kcalloc(l4proto->ctnl_timeout.nlattr_max + 1, sizeof(*tb),
                     GFP_KERNEL);
 
@@ -858,7 +870,7 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
        l4num = nla_get_u8(tb[NFTA_CT_TIMEOUT_L4PROTO]);
        priv->l4proto = l4num;
 
-       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+       l4proto = nf_ct_l4proto_find_get(l4num);
 
        if (l4proto->l4proto != l4num) {
                ret = -EOPNOTSUPP;
index 6e91a37d57f2736a128cefe886422a15d1cee851..07d4efd3d85182997edb4ae0a1fd74d88221a07f 100644 (file)
@@ -235,14 +235,31 @@ err1:
        return err;
 }
 
+static void nft_dynset_activate(const struct nft_ctx *ctx,
+                               const struct nft_expr *expr)
+{
+       struct nft_dynset *priv = nft_expr_priv(expr);
+
+       nf_tables_rebind_set(ctx, priv->set, &priv->binding);
+}
+
+static void nft_dynset_deactivate(const struct nft_ctx *ctx,
+                                 const struct nft_expr *expr)
+{
+       struct nft_dynset *priv = nft_expr_priv(expr);
+
+       nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+}
+
 static void nft_dynset_destroy(const struct nft_ctx *ctx,
                               const struct nft_expr *expr)
 {
        struct nft_dynset *priv = nft_expr_priv(expr);
 
-       nf_tables_unbind_set(ctx, priv->set, &priv->binding);
        if (priv->expr != NULL)
                nft_expr_destroy(ctx, priv->expr);
+
+       nf_tables_destroy_set(ctx, priv->set);
 }
 
 static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -279,6 +296,8 @@ static const struct nft_expr_ops nft_dynset_ops = {
        .eval           = nft_dynset_eval,
        .init           = nft_dynset_init,
        .destroy        = nft_dynset_destroy,
+       .activate       = nft_dynset_activate,
+       .deactivate     = nft_dynset_deactivate,
        .dump           = nft_dynset_dump,
 };
 
index ad13e8643599722a2eb0376957e126f5b192385d..227b2b15a19cbd979df780b3660e2395b689c5db 100644 (file)
@@ -121,12 +121,28 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
        return 0;
 }
 
+static void nft_lookup_activate(const struct nft_ctx *ctx,
+                               const struct nft_expr *expr)
+{
+       struct nft_lookup *priv = nft_expr_priv(expr);
+
+       nf_tables_rebind_set(ctx, priv->set, &priv->binding);
+}
+
+static void nft_lookup_deactivate(const struct nft_ctx *ctx,
+                                 const struct nft_expr *expr)
+{
+       struct nft_lookup *priv = nft_expr_priv(expr);
+
+       nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+}
+
 static void nft_lookup_destroy(const struct nft_ctx *ctx,
                               const struct nft_expr *expr)
 {
        struct nft_lookup *priv = nft_expr_priv(expr);
 
-       nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+       nf_tables_destroy_set(ctx, priv->set);
 }
 
 static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -209,6 +225,8 @@ static const struct nft_expr_ops nft_lookup_ops = {
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
        .eval           = nft_lookup_eval,
        .init           = nft_lookup_init,
+       .activate       = nft_lookup_activate,
+       .deactivate     = nft_lookup_deactivate,
        .destroy        = nft_lookup_destroy,
        .dump           = nft_lookup_dump,
        .validate       = nft_lookup_validate,
index 297fe7d97c182ffbcbfb15c94f10bdec633f1149..6180626c3f80b9069e7af752f568a282e157cc3e 100644 (file)
@@ -284,6 +284,11 @@ static void nft_meta_set_eval(const struct nft_expr *expr,
 
                skb->nf_trace = !!value8;
                break;
+#ifdef CONFIG_NETWORK_SECMARK
+       case NFT_META_SECMARK:
+               skb->secmark = value;
+               break;
+#endif
        default:
                WARN_ON(1);
        }
@@ -436,6 +441,9 @@ static int nft_meta_set_init(const struct nft_ctx *ctx,
        switch (priv->key) {
        case NFT_META_MARK:
        case NFT_META_PRIORITY:
+#ifdef CONFIG_NETWORK_SECMARK
+       case NFT_META_SECMARK:
+#endif
                len = sizeof(u32);
                break;
        case NFT_META_NFTRACE:
@@ -543,3 +551,111 @@ struct nft_expr_type nft_meta_type __read_mostly = {
        .maxattr        = NFTA_META_MAX,
        .owner          = THIS_MODULE,
 };
+
+#ifdef CONFIG_NETWORK_SECMARK
+struct nft_secmark {
+       u32 secid;
+       char *ctx;
+};
+
+static const struct nla_policy nft_secmark_policy[NFTA_SECMARK_MAX + 1] = {
+       [NFTA_SECMARK_CTX]     = { .type = NLA_STRING, .len = NFT_SECMARK_CTX_MAXLEN },
+};
+
+static int nft_secmark_compute_secid(struct nft_secmark *priv)
+{
+       u32 tmp_secid = 0;
+       int err;
+
+       err = security_secctx_to_secid(priv->ctx, strlen(priv->ctx), &tmp_secid);
+       if (err)
+               return err;
+
+       if (!tmp_secid)
+               return -ENOENT;
+
+       err = security_secmark_relabel_packet(tmp_secid);
+       if (err)
+               return err;
+
+       priv->secid = tmp_secid;
+       return 0;
+}
+
+static void nft_secmark_obj_eval(struct nft_object *obj, struct nft_regs *regs,
+                                const struct nft_pktinfo *pkt)
+{
+       const struct nft_secmark *priv = nft_obj_data(obj);
+       struct sk_buff *skb = pkt->skb;
+
+       skb->secmark = priv->secid;
+}
+
+static int nft_secmark_obj_init(const struct nft_ctx *ctx,
+                               const struct nlattr * const tb[],
+                               struct nft_object *obj)
+{
+       struct nft_secmark *priv = nft_obj_data(obj);
+       int err;
+
+       if (tb[NFTA_SECMARK_CTX] == NULL)
+               return -EINVAL;
+
+       priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL);
+       if (!priv->ctx)
+               return -ENOMEM;
+
+       err = nft_secmark_compute_secid(priv);
+       if (err) {
+               kfree(priv->ctx);
+               return err;
+       }
+
+       security_secmark_refcount_inc();
+
+       return 0;
+}
+
+static int nft_secmark_obj_dump(struct sk_buff *skb, struct nft_object *obj,
+                               bool reset)
+{
+       struct nft_secmark *priv = nft_obj_data(obj);
+       int err;
+
+       if (nla_put_string(skb, NFTA_SECMARK_CTX, priv->ctx))
+               return -1;
+
+       if (reset) {
+               err = nft_secmark_compute_secid(priv);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static void nft_secmark_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj)
+{
+       struct nft_secmark *priv = nft_obj_data(obj);
+
+       security_secmark_refcount_dec();
+
+       kfree(priv->ctx);
+}
+
+static const struct nft_object_ops nft_secmark_obj_ops = {
+       .type           = &nft_secmark_obj_type,
+       .size           = sizeof(struct nft_secmark),
+       .init           = nft_secmark_obj_init,
+       .eval           = nft_secmark_obj_eval,
+       .dump           = nft_secmark_obj_dump,
+       .destroy        = nft_secmark_obj_destroy,
+};
+struct nft_object_type nft_secmark_obj_type __read_mostly = {
+       .type           = NFT_OBJECT_SECMARK,
+       .ops            = &nft_secmark_obj_ops,
+       .maxattr        = NFTA_SECMARK_MAX,
+       .policy         = nft_secmark_policy,
+       .owner          = THIS_MODULE,
+};
+#endif /* CONFIG_NETWORK_SECMARK */
index cdf348f751eca0c22018d99954d98aac66499d8d..a3185ca2a3a985712f5b2262df3f9e28af6fab4e 100644 (file)
@@ -155,12 +155,28 @@ nla_put_failure:
        return -1;
 }
 
+static void nft_objref_map_activate(const struct nft_ctx *ctx,
+                                   const struct nft_expr *expr)
+{
+       struct nft_objref_map *priv = nft_expr_priv(expr);
+
+       nf_tables_rebind_set(ctx, priv->set, &priv->binding);
+}
+
+static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
+                                     const struct nft_expr *expr)
+{
+       struct nft_objref_map *priv = nft_expr_priv(expr);
+
+       nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+}
+
 static void nft_objref_map_destroy(const struct nft_ctx *ctx,
                                   const struct nft_expr *expr)
 {
        struct nft_objref_map *priv = nft_expr_priv(expr);
 
-       nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+       nf_tables_destroy_set(ctx, priv->set);
 }
 
 static struct nft_expr_type nft_objref_type;
@@ -169,6 +185,8 @@ static const struct nft_expr_ops nft_objref_map_ops = {
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
        .eval           = nft_objref_map_eval,
        .init           = nft_objref_map_init,
+       .activate       = nft_objref_map_activate,
+       .deactivate     = nft_objref_map_deactivate,
        .destroy        = nft_objref_map_destroy,
        .dump           = nft_objref_map_dump,
 };
index 5af74b37f4236ec0402ac41360f04a5000ae3126..a35fb59ace7326324811a21704eb2932e2ca5a4c 100644 (file)
@@ -49,7 +49,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
 
        priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
        err = nft_validate_register_store(ctx, priv->dreg, NULL,
-                                         NFTA_DATA_VALUE, NFT_OSF_MAXGENRELEN);
+                                         NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
        if (err < 0)
                return err;
 
index 29f5bd2377b0deaf7ede8ec0573bf71cfeef7478..b48e58cceeb72f9635263ac6985da98af48cbbf7 100644 (file)
@@ -94,7 +94,8 @@ static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX + 1] = {
 
 int nft_reject_icmp_code(u8 code)
 {
-       BUG_ON(code > NFT_REJECT_ICMPX_MAX);
+       if (WARN_ON_ONCE(code > NFT_REJECT_ICMPX_MAX))
+               return ICMP_NET_UNREACH;
 
        return icmp_code_v4[code];
 }
@@ -111,7 +112,8 @@ static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX + 1] = {
 
 int nft_reject_icmpv6_code(u8 code)
 {
-       BUG_ON(code > NFT_REJECT_ICMPX_MAX);
+       if (WARN_ON_ONCE(code > NFT_REJECT_ICMPX_MAX))
+               return ICMPV6_NOROUTE;
 
        return icmp_code_v6[code];
 }
index 76dba9f6b6f627de7de1ada08320cc2ed5a12b24..f35fa33913ae4d085c0d63fc7602f8f873d516f8 100644 (file)
@@ -90,6 +90,11 @@ static void nft_rt_get_eval(const struct nft_expr *expr,
        case NFT_RT_TCPMSS:
                nft_reg_store16(dest, get_tcpmss(pkt, dst));
                break;
+#ifdef CONFIG_XFRM
+       case NFT_RT_XFRM:
+               nft_reg_store8(dest, !!dst->xfrm);
+               break;
+#endif
        default:
                WARN_ON(1);
                goto err;
@@ -130,6 +135,11 @@ static int nft_rt_get_init(const struct nft_ctx *ctx,
        case NFT_RT_TCPMSS:
                len = sizeof(u16);
                break;
+#ifdef CONFIG_XFRM
+       case NFT_RT_XFRM:
+               len = sizeof(u8);
+               break;
+#endif
        default:
                return -EOPNOTSUPP;
        }
@@ -164,6 +174,7 @@ static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *exp
        case NFT_RT_NEXTHOP4:
        case NFT_RT_NEXTHOP6:
        case NFT_RT_CLASSID:
+       case NFT_RT_XFRM:
                return 0;
        case NFT_RT_TCPMSS:
                hooks = (1 << NF_INET_FORWARD) |
index 015124e649cbdf0fb658515cf7c7e2dbf550a972..339a9dd1c83210ec800f043c59eb68e4d6659d95 100644 (file)
@@ -88,7 +88,7 @@ static bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
                .key     = key,
        };
 
-       he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params);
+       he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
        if (he != NULL)
                *ext = &he->ext;
 
@@ -106,7 +106,7 @@ static void *nft_rhash_get(const struct net *net, const struct nft_set *set,
                .key     = elem->key.val.data,
        };
 
-       he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params);
+       he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
        if (he != NULL)
                return he;
 
@@ -129,7 +129,7 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key,
                .key     = key,
        };
 
-       he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params);
+       he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
        if (he != NULL)
                goto out;
 
@@ -217,7 +217,7 @@ static void *nft_rhash_deactivate(const struct net *net,
        };
 
        rcu_read_lock();
-       he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params);
+       he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
        if (he != NULL &&
            !nft_rhash_flush(net, set, he))
                he = NULL;
@@ -244,21 +244,15 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
        struct nft_rhash_elem *he;
        struct rhashtable_iter hti;
        struct nft_set_elem elem;
-       int err;
-
-       err = rhashtable_walk_init(&priv->ht, &hti, GFP_ATOMIC);
-       iter->err = err;
-       if (err)
-               return;
 
+       rhashtable_walk_enter(&priv->ht, &hti);
        rhashtable_walk_start(&hti);
 
        while ((he = rhashtable_walk_next(&hti))) {
                if (IS_ERR(he)) {
-                       err = PTR_ERR(he);
-                       if (err != -EAGAIN) {
-                               iter->err = err;
-                               goto out;
+                       if (PTR_ERR(he) != -EAGAIN) {
+                               iter->err = PTR_ERR(he);
+                               break;
                        }
 
                        continue;
@@ -275,13 +269,11 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
 
                iter->err = iter->fn(ctx, set, iter, &elem);
                if (iter->err < 0)
-                       goto out;
+                       break;
 
 cont:
                iter->count++;
        }
-
-out:
        rhashtable_walk_stop(&hti);
        rhashtable_walk_exit(&hti);
 }
@@ -293,21 +285,17 @@ static void nft_rhash_gc(struct work_struct *work)
        struct nft_rhash *priv;
        struct nft_set_gc_batch *gcb = NULL;
        struct rhashtable_iter hti;
-       int err;
 
        priv = container_of(work, struct nft_rhash, gc_work.work);
        set  = nft_set_container_of(priv);
 
-       err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
-       if (err)
-               goto schedule;
-
+       rhashtable_walk_enter(&priv->ht, &hti);
        rhashtable_walk_start(&hti);
 
        while ((he = rhashtable_walk_next(&hti))) {
                if (IS_ERR(he)) {
                        if (PTR_ERR(he) != -EAGAIN)
-                               goto out;
+                               break;
                        continue;
                }
 
@@ -326,17 +314,15 @@ gc:
 
                gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
                if (gcb == NULL)
-                       goto out;
+                       break;
                rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params);
                atomic_dec(&set->nelems);
                nft_set_gc_batch_add(gcb, he);
        }
-out:
        rhashtable_walk_stop(&hti);
        rhashtable_walk_exit(&hti);
 
        nft_set_gc_batch_complete(gcb);
-schedule:
        queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
                           nft_set_gc_interval(set));
 }
index 55e2d9215c0d4fe488c0d78c5ce41979e9098852..0e5ec126f6ad0516acf0576f01c4430dec43aec8 100644 (file)
@@ -355,12 +355,11 @@ cont:
 
 static void nft_rbtree_gc(struct work_struct *work)
 {
+       struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
        struct nft_set_gc_batch *gcb = NULL;
-       struct rb_node *node, *prev = NULL;
-       struct nft_rbtree_elem *rbe;
        struct nft_rbtree *priv;
+       struct rb_node *node;
        struct nft_set *set;
-       int i;
 
        priv = container_of(work, struct nft_rbtree, gc_work.work);
        set  = nft_set_container_of(priv);
@@ -371,7 +370,7 @@ static void nft_rbtree_gc(struct work_struct *work)
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
 
                if (nft_rbtree_interval_end(rbe)) {
-                       prev = node;
+                       rbe_end = rbe;
                        continue;
                }
                if (!nft_set_elem_expired(&rbe->ext))
@@ -379,29 +378,30 @@ static void nft_rbtree_gc(struct work_struct *work)
                if (nft_set_elem_mark_busy(&rbe->ext))
                        continue;
 
+               if (rbe_prev) {
+                       rb_erase(&rbe_prev->node, &priv->root);
+                       rbe_prev = NULL;
+               }
                gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
                if (!gcb)
                        break;
 
                atomic_dec(&set->nelems);
                nft_set_gc_batch_add(gcb, rbe);
+               rbe_prev = rbe;
 
-               if (prev) {
-                       rbe = rb_entry(prev, struct nft_rbtree_elem, node);
+               if (rbe_end) {
                        atomic_dec(&set->nelems);
-                       nft_set_gc_batch_add(gcb, rbe);
-                       prev = NULL;
+                       nft_set_gc_batch_add(gcb, rbe_end);
+                       rb_erase(&rbe_end->node, &priv->root);
+                       rbe_end = NULL;
                }
                node = rb_next(node);
                if (!node)
                        break;
        }
-       if (gcb) {
-               for (i = 0; i < gcb->head.cnt; i++) {
-                       rbe = gcb->elems[i];
-                       rb_erase(&rbe->node, &priv->root);
-               }
-       }
+       if (rbe_prev)
+               rb_erase(&rbe_prev->node, &priv->root);
        write_seqcount_end(&priv->count);
        write_unlock_bh(&priv->lock);
 
diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c
new file mode 100644 (file)
index 0000000..3cf71a2
--- /dev/null
@@ -0,0 +1,293 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Generic part shared by ipv4 and ipv6 backends.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <linux/in.h>
+#include <net/xfrm.h>
+
+static const struct nla_policy nft_xfrm_policy[NFTA_XFRM_MAX + 1] = {
+       [NFTA_XFRM_KEY]         = { .type = NLA_U32 },
+       [NFTA_XFRM_DIR]         = { .type = NLA_U8 },
+       [NFTA_XFRM_SPNUM]       = { .type = NLA_U32 },
+       [NFTA_XFRM_DREG]        = { .type = NLA_U32 },
+};
+
+struct nft_xfrm {
+       enum nft_xfrm_keys      key:8;
+       enum nft_registers      dreg:8;
+       u8                      dir;
+       u8                      spnum;
+};
+
+static int nft_xfrm_get_init(const struct nft_ctx *ctx,
+                            const struct nft_expr *expr,
+                            const struct nlattr * const tb[])
+{
+       struct nft_xfrm *priv = nft_expr_priv(expr);
+       unsigned int len = 0;
+       u32 spnum = 0;
+       u8 dir;
+
+       if (!tb[NFTA_XFRM_KEY] || !tb[NFTA_XFRM_DIR] || !tb[NFTA_XFRM_DREG])
+               return -EINVAL;
+
+       switch (ctx->family) {
+       case NFPROTO_IPV4:
+       case NFPROTO_IPV6:
+       case NFPROTO_INET:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       priv->key = ntohl(nla_get_u32(tb[NFTA_XFRM_KEY]));
+       switch (priv->key) {
+       case NFT_XFRM_KEY_REQID:
+       case NFT_XFRM_KEY_SPI:
+               len = sizeof(u32);
+               break;
+       case NFT_XFRM_KEY_DADDR_IP4:
+       case NFT_XFRM_KEY_SADDR_IP4:
+               len = sizeof(struct in_addr);
+               break;
+       case NFT_XFRM_KEY_DADDR_IP6:
+       case NFT_XFRM_KEY_SADDR_IP6:
+               len = sizeof(struct in6_addr);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       dir = nla_get_u8(tb[NFTA_XFRM_DIR]);
+       switch (dir) {
+       case XFRM_POLICY_IN:
+       case XFRM_POLICY_OUT:
+               priv->dir = dir;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (tb[NFTA_XFRM_SPNUM])
+               spnum = ntohl(nla_get_be32(tb[NFTA_XFRM_SPNUM]));
+
+       if (spnum >= XFRM_MAX_DEPTH)
+               return -ERANGE;
+
+       priv->spnum = spnum;
+
+       priv->dreg = nft_parse_register(tb[NFTA_XFRM_DREG]);
+       return nft_validate_register_store(ctx, priv->dreg, NULL,
+                                          NFT_DATA_VALUE, len);
+}
+
+/* Return true if key asks for daddr/saddr and current
+ * state does have a valid address (BEET, TUNNEL).
+ */
+static bool xfrm_state_addr_ok(enum nft_xfrm_keys k, u8 family, u8 mode)
+{
+       switch (k) {
+       case NFT_XFRM_KEY_DADDR_IP4:
+       case NFT_XFRM_KEY_SADDR_IP4:
+               if (family == NFPROTO_IPV4)
+                       break;
+               return false;
+       case NFT_XFRM_KEY_DADDR_IP6:
+       case NFT_XFRM_KEY_SADDR_IP6:
+               if (family == NFPROTO_IPV6)
+                       break;
+               return false;
+       default:
+               return true;
+       }
+
+       return mode == XFRM_MODE_BEET || mode == XFRM_MODE_TUNNEL;
+}
+
+static void nft_xfrm_state_get_key(const struct nft_xfrm *priv,
+                                  struct nft_regs *regs,
+                                  const struct xfrm_state *state,
+                                  u8 family)
+{
+       u32 *dest = &regs->data[priv->dreg];
+
+       if (!xfrm_state_addr_ok(priv->key, family, state->props.mode)) {
+               regs->verdict.code = NFT_BREAK;
+               return;
+       }
+
+       switch (priv->key) {
+       case NFT_XFRM_KEY_UNSPEC:
+       case __NFT_XFRM_KEY_MAX:
+               WARN_ON_ONCE(1);
+               break;
+       case NFT_XFRM_KEY_DADDR_IP4:
+               *dest = state->id.daddr.a4;
+               return;
+       case NFT_XFRM_KEY_DADDR_IP6:
+               memcpy(dest, &state->id.daddr.in6, sizeof(struct in6_addr));
+               return;
+       case NFT_XFRM_KEY_SADDR_IP4:
+               *dest = state->props.saddr.a4;
+               return;
+       case NFT_XFRM_KEY_SADDR_IP6:
+               memcpy(dest, &state->props.saddr.in6, sizeof(struct in6_addr));
+               return;
+       case NFT_XFRM_KEY_REQID:
+               *dest = state->props.reqid;
+               return;
+       case NFT_XFRM_KEY_SPI:
+               *dest = state->id.spi;
+               return;
+       }
+
+       regs->verdict.code = NFT_BREAK;
+}
+
+static void nft_xfrm_get_eval_in(const struct nft_xfrm *priv,
+                                   struct nft_regs *regs,
+                                   const struct nft_pktinfo *pkt)
+{
+       const struct sec_path *sp = pkt->skb->sp;
+       const struct xfrm_state *state;
+
+       if (sp == NULL || sp->len <= priv->spnum) {
+               regs->verdict.code = NFT_BREAK;
+               return;
+       }
+
+       state = sp->xvec[priv->spnum];
+       nft_xfrm_state_get_key(priv, regs, state, nft_pf(pkt));
+}
+
+static void nft_xfrm_get_eval_out(const struct nft_xfrm *priv,
+                                 struct nft_regs *regs,
+                                 const struct nft_pktinfo *pkt)
+{
+       const struct dst_entry *dst = skb_dst(pkt->skb);
+       int i;
+
+       for (i = 0; dst && dst->xfrm;
+            dst = ((const struct xfrm_dst *)dst)->child, i++) {
+               if (i < priv->spnum)
+                       continue;
+
+               nft_xfrm_state_get_key(priv, regs, dst->xfrm, nft_pf(pkt));
+               return;
+       }
+
+       regs->verdict.code = NFT_BREAK;
+}
+
+static void nft_xfrm_get_eval(const struct nft_expr *expr,
+                             struct nft_regs *regs,
+                             const struct nft_pktinfo *pkt)
+{
+       const struct nft_xfrm *priv = nft_expr_priv(expr);
+
+       switch (priv->dir) {
+       case XFRM_POLICY_IN:
+               nft_xfrm_get_eval_in(priv, regs, pkt);
+               break;
+       case XFRM_POLICY_OUT:
+               nft_xfrm_get_eval_out(priv, regs, pkt);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               regs->verdict.code = NFT_BREAK;
+               break;
+       }
+}
+
+static int nft_xfrm_get_dump(struct sk_buff *skb,
+                            const struct nft_expr *expr)
+{
+       const struct nft_xfrm *priv = nft_expr_priv(expr);
+
+       if (nft_dump_register(skb, NFTA_XFRM_DREG, priv->dreg))
+               return -1;
+
+       if (nla_put_be32(skb, NFTA_XFRM_KEY, htonl(priv->key)))
+               return -1;
+       if (nla_put_u8(skb, NFTA_XFRM_DIR, priv->dir))
+               return -1;
+       if (nla_put_be32(skb, NFTA_XFRM_SPNUM, htonl(priv->spnum)))
+               return -1;
+
+       return 0;
+}
+
+static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                            const struct nft_data **data)
+{
+       const struct nft_xfrm *priv = nft_expr_priv(expr);
+       unsigned int hooks;
+
+       switch (priv->dir) {
+       case XFRM_POLICY_IN:
+               hooks = (1 << NF_INET_FORWARD) |
+                       (1 << NF_INET_LOCAL_IN) |
+                       (1 << NF_INET_PRE_ROUTING);
+               break;
+       case XFRM_POLICY_OUT:
+               hooks = (1 << NF_INET_FORWARD) |
+                       (1 << NF_INET_LOCAL_OUT) |
+                       (1 << NF_INET_POST_ROUTING);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               return -EINVAL;
+       }
+
+       return nft_chain_validate_hooks(ctx->chain, hooks);
+}
+
+
+static struct nft_expr_type nft_xfrm_type;
+static const struct nft_expr_ops nft_xfrm_get_ops = {
+       .type           = &nft_xfrm_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_xfrm)),
+       .eval           = nft_xfrm_get_eval,
+       .init           = nft_xfrm_get_init,
+       .dump           = nft_xfrm_get_dump,
+       .validate       = nft_xfrm_validate,
+};
+
+static struct nft_expr_type nft_xfrm_type __read_mostly = {
+       .name           = "xfrm",
+       .ops            = &nft_xfrm_get_ops,
+       .policy         = nft_xfrm_policy,
+       .maxattr        = NFTA_XFRM_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_xfrm_module_init(void)
+{
+       return nft_register_expr(&nft_xfrm_type);
+}
+
+static void __exit nft_xfrm_module_exit(void)
+{
+       nft_unregister_expr(&nft_xfrm_type);
+}
+
+module_init(nft_xfrm_module_init);
+module_exit(nft_xfrm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("nf_tables: xfrm/IPSec matching");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_AUTHOR("Máté Eckl <ecklm94@gmail.com>");
+MODULE_ALIAS_NFT_EXPR("xfrm");
index 89457efd2e008261b549dff8fe73d0ec8abaad02..2c7a4b80206f50cfca179f3c5a731bf70091938b 100644 (file)
@@ -159,7 +159,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
        /* Make sure the timeout policy matches any existing protocol tracker,
         * otherwise default to generic.
         */
-       l4proto = __nf_ct_l4proto_find(par->family, proto);
+       l4proto = __nf_ct_l4proto_find(proto);
        if (timeout->l4proto->l4proto != l4proto->l4proto) {
                ret = -EINVAL;
                pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n",
index 5ee85919378348367a3b32b710b4e3b6e5590574..c6acfc2d9c8414d36173e3cf09f94ea64f0d7515 100644 (file)
@@ -68,8 +68,6 @@ struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
 {
        struct idletimer_tg *entry;
 
-       BUG_ON(!label);
-
        list_for_each_entry(entry, &idletimer_tg_list, entry) {
                if (!strcmp(label, entry->attr.attr.name))
                        return entry;
@@ -172,8 +170,6 @@ static unsigned int idletimer_tg_target(struct sk_buff *skb,
        pr_debug("resetting timer %s, timeout period %u\n",
                 info->label, info->timeout);
 
-       BUG_ON(!info->timer);
-
        mod_timer(&info->timer->timer,
                  msecs_to_jiffies(info->timeout * 1000) + jiffies);
 
index 4ad5fe27e08bcc6732f8f8a9977530b908430578..f16202d26c205a37eb5d456f697152903aa23343 100644 (file)
@@ -35,8 +35,6 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
        u32 secmark = 0;
        const struct xt_secmark_target_info *info = par->targinfo;
 
-       BUG_ON(info->mode != mode);
-
        switch (mode) {
        case SECMARK_MODE_SEL:
                secmark = info->secid;
index 5d92e178198088b85d040473f909aa9eab78c18e..5cb1ecb29ea4d5c4f5df9fc325aac8e81d0bde58 100644 (file)
@@ -68,6 +68,38 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
        return 0;
 }
 
+static int cgroup_mt_check_v2(const struct xt_mtchk_param *par)
+{
+       struct xt_cgroup_info_v2 *info = par->matchinfo;
+       struct cgroup *cgrp;
+
+       if ((info->invert_path & ~1) || (info->invert_classid & ~1))
+               return -EINVAL;
+
+       if (!info->has_path && !info->has_classid) {
+               pr_info("xt_cgroup: no path or classid specified\n");
+               return -EINVAL;
+       }
+
+       if (info->has_path && info->has_classid) {
+               pr_info_ratelimited("path and classid specified\n");
+               return -EINVAL;
+       }
+
+       info->priv = NULL;
+       if (info->has_path) {
+               cgrp = cgroup_get_from_path(info->path);
+               if (IS_ERR(cgrp)) {
+                       pr_info_ratelimited("invalid path, errno=%ld\n",
+                                           PTR_ERR(cgrp));
+                       return -EINVAL;
+               }
+               info->priv = cgrp;
+       }
+
+       return 0;
+}
+
 static bool
 cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
 {
@@ -99,6 +131,24 @@ static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
                        info->invert_classid;
 }
 
+static bool cgroup_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_cgroup_info_v2 *info = par->matchinfo;
+       struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
+       struct cgroup *ancestor = info->priv;
+       struct sock *sk = skb->sk;
+
+       if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
+               return false;
+
+       if (ancestor)
+               return cgroup_is_descendant(sock_cgroup_ptr(skcd), ancestor) ^
+                       info->invert_path;
+       else
+               return (info->classid == sock_cgroup_classid(skcd)) ^
+                       info->invert_classid;
+}
+
 static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par)
 {
        struct xt_cgroup_info_v1 *info = par->matchinfo;
@@ -107,6 +157,14 @@ static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par)
                cgroup_put(info->priv);
 }
 
+static void cgroup_mt_destroy_v2(const struct xt_mtdtor_param *par)
+{
+       struct xt_cgroup_info_v2 *info = par->matchinfo;
+
+       if (info->priv)
+               cgroup_put(info->priv);
+}
+
 static struct xt_match cgroup_mt_reg[] __read_mostly = {
        {
                .name           = "cgroup",
@@ -134,6 +192,20 @@ static struct xt_match cgroup_mt_reg[] __read_mostly = {
                                  (1 << NF_INET_POST_ROUTING) |
                                  (1 << NF_INET_LOCAL_IN),
        },
+       {
+               .name           = "cgroup",
+               .revision       = 2,
+               .family         = NFPROTO_UNSPEC,
+               .checkentry     = cgroup_mt_check_v2,
+               .match          = cgroup_mt_v2,
+               .matchsize      = sizeof(struct xt_cgroup_info_v2),
+               .usersize       = offsetof(struct xt_cgroup_info_v2, priv),
+               .destroy        = cgroup_mt_destroy_v2,
+               .me             = THIS_MODULE,
+               .hooks          = (1 << NF_INET_LOCAL_OUT) |
+                                 (1 << NF_INET_POST_ROUTING) |
+                                 (1 << NF_INET_LOCAL_IN),
+       },
 };
 
 static int __init cgroup_mt_init(void)
index 10d61a6eed712442c14cc1011341c2dff5890c2d..fceae245eb0367f2b950a3117851c7c7a162eb7e 100644 (file)
 #include <linux/netfilter/xt_quota.h>
 #include <linux/module.h>
 
-struct xt_quota_priv {
-       spinlock_t      lock;
-       uint64_t        quota;
-};
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
 MODULE_DESCRIPTION("Xtables: countdown quota match");
@@ -26,54 +21,48 @@ static bool
 quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
        struct xt_quota_info *q = (void *)par->matchinfo;
-       struct xt_quota_priv *priv = q->master;
+       u64 current_count = atomic64_read(&q->counter);
        bool ret = q->flags & XT_QUOTA_INVERT;
-
-       spin_lock_bh(&priv->lock);
-       if (priv->quota >= skb->len) {
-               priv->quota -= skb->len;
-               ret = !ret;
-       } else {
-               /* we do not allow even small packets from now on */
-               priv->quota = 0;
-       }
-       spin_unlock_bh(&priv->lock);
-
-       return ret;
+       u64 old_count, new_count;
+
+       do {
+               if (current_count == 1)
+                       return ret;
+               if (current_count <= skb->len) {
+                       atomic64_set(&q->counter, 1);
+                       return ret;
+               }
+               old_count = current_count;
+               new_count = current_count - skb->len;
+               current_count = atomic64_cmpxchg(&q->counter, old_count,
+                                                new_count);
+       } while (current_count != old_count);
+       return !ret;
 }
 
 static int quota_mt_check(const struct xt_mtchk_param *par)
 {
        struct xt_quota_info *q = par->matchinfo;
 
+       BUILD_BUG_ON(sizeof(atomic64_t) != sizeof(__u64));
+
        if (q->flags & ~XT_QUOTA_MASK)
                return -EINVAL;
+       if (atomic64_read(&q->counter) > q->quota + 1)
+               return -ERANGE;
 
-       q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
-       if (q->master == NULL)
-               return -ENOMEM;
-
-       spin_lock_init(&q->master->lock);
-       q->master->quota = q->quota;
+       if (atomic64_read(&q->counter) == 0)
+               atomic64_set(&q->counter, q->quota + 1);
        return 0;
 }
 
-static void quota_mt_destroy(const struct xt_mtdtor_param *par)
-{
-       const struct xt_quota_info *q = par->matchinfo;
-
-       kfree(q->master);
-}
-
 static struct xt_match quota_mt_reg __read_mostly = {
        .name       = "quota",
        .revision   = 0,
        .family     = NFPROTO_UNSPEC,
        .match      = quota_mt,
        .checkentry = quota_mt_check,
-       .destroy    = quota_mt_destroy,
        .matchsize  = sizeof(struct xt_quota_info),
-       .usersize   = offsetof(struct xt_quota_info, master),
        .me         = THIS_MODULE,
 };
 
index 0472f34728423ac1a3ba839a72e4aab167df1091..ada144e5645bb3075b36b5c4fd23a1bb9020c874 100644 (file)
@@ -56,7 +56,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
        struct sk_buff *pskb = (struct sk_buff *)skb;
        struct sock *sk = skb->sk;
 
-       if (!net_eq(xt_net(par), sock_net(sk)))
+       if (sk && !net_eq(xt_net(par), sock_net(sk)))
                sk = NULL;
 
        if (!sk)
@@ -117,7 +117,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
        struct sk_buff *pskb = (struct sk_buff *)skb;
        struct sock *sk = skb->sk;
 
-       if (!net_eq(xt_net(par), sock_net(sk)))
+       if (sk && !net_eq(xt_net(par), sock_net(sk)))
                sk = NULL;
 
        if (!sk)
index e3a0538ec0bed789a245a1f50e121f9fb2f2a836..e613a9f896004ec1c4ca7e1b6b047259b622ebd1 100644 (file)
@@ -1706,6 +1706,13 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                        nlk->flags &= ~NETLINK_F_EXT_ACK;
                err = 0;
                break;
+       case NETLINK_DUMP_STRICT_CHK:
+               if (val)
+                       nlk->flags |= NETLINK_F_STRICT_CHK;
+               else
+                       nlk->flags &= ~NETLINK_F_STRICT_CHK;
+               err = 0;
+               break;
        default:
                err = -ENOPROTOOPT;
        }
@@ -1799,6 +1806,15 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
                        return -EFAULT;
                err = 0;
                break;
+       case NETLINK_DUMP_STRICT_CHK:
+               if (len < sizeof(int))
+                       return -EINVAL;
+               len = sizeof(int);
+               val = nlk->flags & NETLINK_F_STRICT_CHK ? 1 : 0;
+               if (put_user(len, optlen) || put_user(val, optval))
+                       return -EFAULT;
+               err = 0;
+               break;
        default:
                err = -ENOPROTOOPT;
        }
@@ -2171,6 +2187,7 @@ EXPORT_SYMBOL(__nlmsg_put);
 static int netlink_dump(struct sock *sk)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
+       struct netlink_ext_ack extack = {};
        struct netlink_callback *cb;
        struct sk_buff *skb = NULL;
        struct nlmsghdr *nlh;
@@ -2222,8 +2239,11 @@ static int netlink_dump(struct sock *sk)
        skb_reserve(skb, skb_tailroom(skb) - alloc_size);
        netlink_skb_set_owner_r(skb, sk);
 
-       if (nlk->dump_done_errno > 0)
+       if (nlk->dump_done_errno > 0) {
+               cb->extack = &extack;
                nlk->dump_done_errno = cb->dump(skb, cb);
+               cb->extack = NULL;
+       }
 
        if (nlk->dump_done_errno > 0 ||
            skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
@@ -2246,6 +2266,12 @@ static int netlink_dump(struct sock *sk)
        memcpy(nlmsg_data(nlh), &nlk->dump_done_errno,
               sizeof(nlk->dump_done_errno));
 
+       if (extack._msg && nlk->flags & NETLINK_F_EXT_ACK) {
+               nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
+               if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack._msg))
+                       nlmsg_end(skb, nlh);
+       }
+
        if (sk_filter(sk, skb))
                kfree_skb(skb);
        else
@@ -2272,9 +2298,9 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
                         const struct nlmsghdr *nlh,
                         struct netlink_dump_control *control)
 {
+       struct netlink_sock *nlk, *nlk2;
        struct netlink_callback *cb;
        struct sock *sk;
-       struct netlink_sock *nlk;
        int ret;
 
        refcount_inc(&skb->users);
@@ -2308,6 +2334,9 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        cb->min_dump_alloc = control->min_dump_alloc;
        cb->skb = skb;
 
+       nlk2 = nlk_sk(NETLINK_CB(skb).sk);
+       cb->strict_check = !!(nlk2->flags & NETLINK_F_STRICT_CHK);
+
        if (control->start) {
                ret = control->start(cb);
                if (ret)
index 962de7b3c023d44e5ab5bc5a62544c181963d9cd..5f454c8de6a4de07996578538d98bfd8ad45b950 100644 (file)
@@ -15,6 +15,7 @@
 #define NETLINK_F_LISTEN_ALL_NSID      0x10
 #define NETLINK_F_CAP_ACK              0x20
 #define NETLINK_F_EXT_ACK              0x40
+#define NETLINK_F_STRICT_CHK           0x80
 
 #define NLGRPSZ(x)     (ALIGN(x, sizeof(unsigned long) * 8) / 8)
 #define NLGRPLONGS(x)  (NLGRPSZ(x)/sizeof(unsigned long))
index 86a75105af1a2726bc52e44e6c3ac691d719999f..6bec37ab4472796ecd1f453966b27bb911bf8fa8 100644 (file)
@@ -933,6 +933,11 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
        struct nf_conn *ct;
 
        if (!cached) {
+               struct nf_hook_state state = {
+                       .hook = NF_INET_PRE_ROUTING,
+                       .pf = info->family,
+                       .net = net,
+               };
                struct nf_conn *tmpl = info->ct;
                int err;
 
@@ -944,8 +949,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
                        nf_ct_set(skb, tmpl, IP_CT_NEW);
                }
 
-               err = nf_conntrack_in(net, info->family,
-                                     NF_INET_PRE_ROUTING, skb);
+               err = nf_conntrack_in(skb, &state);
                if (err != NF_ACCEPT)
                        return -ENOENT;
 
@@ -1312,6 +1316,10 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
 
        rcu_assign_pointer(help->helper, helper);
        info->helper = helper;
+
+       if (info->nat)
+               request_module("ip_nat_%s", name);
+
        return 0;
 }
 
@@ -1624,10 +1632,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
                OVS_NLERR(log, "Failed to allocate conntrack template");
                return -ENOMEM;
        }
-
-       __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
-       nf_conntrack_get(&ct_info.ct->ct_general);
-
        if (helper) {
                err = ovs_ct_add_helper(&ct_info, helper, key, log);
                if (err)
@@ -1639,6 +1643,8 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
        if (err)
                goto err_free_ct;
 
+       __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
+       nf_conntrack_get(&ct_info.ct->ct_general);
        return 0;
 err_free_ct:
        __ovs_ct_free_action(&ct_info);
index 0f5ce77460d44099277e142e37d11620e426e9cb..6679e96ab1dcdf8761845b863c39e1b6aac20d2e 100644 (file)
@@ -1182,14 +1182,14 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
                                                       ovs_header->dp_ifindex,
                                                       reply, info->snd_portid,
                                                       info->snd_seq, 0,
-                                                      OVS_FLOW_CMD_NEW,
+                                                      OVS_FLOW_CMD_SET,
                                                       ufid_flags);
                        BUG_ON(error < 0);
                }
        } else {
                /* Could not alloc without acts before locking. */
                reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
-                                               info, OVS_FLOW_CMD_NEW, false,
+                                               info, OVS_FLOW_CMD_SET, false,
                                                ufid_flags);
 
                if (IS_ERR(reply)) {
@@ -1265,7 +1265,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
        }
 
        reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
-                                       OVS_FLOW_CMD_NEW, true, ufid_flags);
+                                       OVS_FLOW_CMD_GET, true, ufid_flags);
        if (IS_ERR(reply)) {
                err = PTR_ERR(reply);
                goto unlock;
@@ -1389,7 +1389,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
                if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
                                           NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                          OVS_FLOW_CMD_NEW, ufid_flags) < 0)
+                                          OVS_FLOW_CMD_GET, ufid_flags) < 0)
                        break;
 
                cb->args[0] = bucket;
@@ -1730,7 +1730,7 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
        ovs_dp_change(dp, info->attrs);
 
        err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
-                                  info->snd_seq, 0, OVS_DP_CMD_NEW);
+                                  info->snd_seq, 0, OVS_DP_CMD_SET);
        BUG_ON(err < 0);
 
        ovs_unlock();
@@ -1761,7 +1761,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
                goto err_unlock_free;
        }
        err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
-                                  info->snd_seq, 0, OVS_DP_CMD_NEW);
+                                  info->snd_seq, 0, OVS_DP_CMD_GET);
        BUG_ON(err < 0);
        ovs_unlock();
 
@@ -1785,7 +1785,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
                if (i >= skip &&
                    ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                        OVS_DP_CMD_NEW) < 0)
+                                        OVS_DP_CMD_GET) < 0)
                        break;
                i++;
        }
@@ -2101,7 +2101,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
 
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_NEW);
+                                     OVS_VPORT_CMD_SET);
        BUG_ON(err < 0);
 
        ovs_unlock();
@@ -2182,7 +2182,7 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
                goto exit_unlock_free;
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_NEW);
+                                     OVS_VPORT_CMD_GET);
        BUG_ON(err < 0);
        rcu_read_unlock();
 
@@ -2218,7 +2218,7 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                                    NETLINK_CB(cb->skb).portid,
                                                    cb->nlh->nlmsg_seq,
                                                    NLM_F_MULTI,
-                                                   OVS_VPORT_CMD_NEW) < 0)
+                                                   OVS_VPORT_CMD_GET) < 0)
                                goto out;
 
                        j++;
index bb95c43aae76276bac5db2b4e7765b102718de16..26f71cbf7527692af0d66fdd379ee99d38c30019 100644 (file)
@@ -43,7 +43,8 @@ static struct internal_dev *internal_dev_priv(struct net_device *netdev)
 }
 
 /* Called with rcu_read_lock_bh. */
-static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t
+internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        int len, err;
 
@@ -62,7 +63,7 @@ static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
        } else {
                netdev->stats.tx_errors++;
        }
-       return 0;
+       return NETDEV_TX_OK;
 }
 
 static int internal_dev_open(struct net_device *netdev)
index f85f67b5c1f41e7cd938ee6ffac8d1d2e6b2f46b..ec3095f13aaee114476a8f06706bfeb7f7739002 100644 (file)
@@ -2715,10 +2715,12 @@ tpacket_error:
                        }
                }
 
-               if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr,
-                                                             vio_le())) {
-                       tp_len = -EINVAL;
-                       goto tpacket_error;
+               if (po->has_vnet_hdr) {
+                       if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
+                               tp_len = -EINVAL;
+                               goto tpacket_error;
+                       }
+                       virtio_net_hdr_set_proto(skb, vnet_hdr);
                }
 
                skb->destructor = tpacket_destruct_skb;
@@ -2915,6 +2917,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
                if (err)
                        goto out_free;
                len += sizeof(vnet_hdr);
+               virtio_net_hdr_set_proto(skb, &vnet_hdr);
        }
 
        skb_probe_transport_header(skb, reserve);
index ac44d8afffb118101459426f4f8c2edbba4d8401..013dbcb052e5a1c7900fe22f76d1e4a9cbd7f534 100644 (file)
@@ -97,7 +97,8 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx,
            srx->transport_len > len)
                return -EINVAL;
 
-       if (srx->transport.family != rx->family)
+       if (srx->transport.family != rx->family &&
+           srx->transport.family == AF_INET && rx->family != AF_INET6)
                return -EAFNOSUPPORT;
 
        switch (srx->transport.family) {
@@ -384,6 +385,20 @@ u32 rxrpc_kernel_check_life(struct socket *sock, struct rxrpc_call *call)
 }
 EXPORT_SYMBOL(rxrpc_kernel_check_life);
 
+/**
+ * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
+ * @sock: The socket the call is on
+ * @call: The call to query
+ *
+ * Allow a kernel service to retrieve the epoch value from a service call to
+ * see if the client at the other end rebooted.
+ */
+u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call)
+{
+       return call->conn->proto.epoch;
+}
+EXPORT_SYMBOL(rxrpc_kernel_get_epoch);
+
 /**
  * rxrpc_kernel_check_call - Check a call's state
  * @sock: The socket the call is on
index c9755871042159bdf32a7bd980d8ee4d3a9a51bf..76569c1789152db8fae921e807b9a868ee39c0f1 100644 (file)
@@ -40,17 +40,12 @@ struct rxrpc_crypt {
 struct rxrpc_connection;
 
 /*
- * Mark applied to socket buffers.
+ * Mark applied to socket buffers in skb->mark.  skb->priority is used
+ * to pass supplementary information.
  */
 enum rxrpc_skb_mark {
-       RXRPC_SKB_MARK_DATA,            /* data message */
-       RXRPC_SKB_MARK_FINAL_ACK,       /* final ACK received message */
-       RXRPC_SKB_MARK_BUSY,            /* server busy message */
-       RXRPC_SKB_MARK_REMOTE_ABORT,    /* remote abort message */
-       RXRPC_SKB_MARK_LOCAL_ABORT,     /* local abort message */
-       RXRPC_SKB_MARK_NET_ERROR,       /* network error message */
-       RXRPC_SKB_MARK_LOCAL_ERROR,     /* local error message */
-       RXRPC_SKB_MARK_NEW_CALL,        /* local error message */
+       RXRPC_SKB_MARK_REJECT_BUSY,     /* Reject with BUSY */
+       RXRPC_SKB_MARK_REJECT_ABORT,    /* Reject with ABORT (code in skb->priority) */
 };
 
 /*
@@ -293,7 +288,6 @@ struct rxrpc_peer {
        struct hlist_node       hash_link;
        struct rxrpc_local      *local;
        struct hlist_head       error_targets;  /* targets for net error distribution */
-       struct work_struct      error_distributor;
        struct rb_root          service_conns;  /* Service connections */
        struct list_head        keepalive_link; /* Link in net->peer_keepalive[] */
        time64_t                last_tx_at;     /* Last time packet sent here */
@@ -304,8 +298,6 @@ struct rxrpc_peer {
        unsigned int            maxdata;        /* data size (MTU - hdrsize) */
        unsigned short          hdrsize;        /* header size (IP + UDP + RxRPC) */
        int                     debug_id;       /* debug ID for printks */
-       int                     error_report;   /* Net (+0) or local (+1000000) to distribute */
-#define RXRPC_LOCAL_ERROR_OFFSET 1000000
        struct sockaddr_rxrpc   srx;            /* remote address */
 
        /* calculated RTT cache */
@@ -463,6 +455,16 @@ struct rxrpc_connection {
        u8                      out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
 };
 
+static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
+{
+       return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
+}
+
+static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
+{
+       return !rxrpc_to_server(sp);
+}
+
 /*
  * Flags in call->flags.
  */
@@ -717,6 +719,8 @@ extern struct workqueue_struct *rxrpc_workqueue;
 int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
 void rxrpc_discard_prealloc(struct rxrpc_sock *);
 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
+                                          struct rxrpc_sock *,
+                                          struct rxrpc_peer *,
                                           struct rxrpc_connection *,
                                           struct sk_buff *);
 void rxrpc_accept_incoming_calls(struct rxrpc_local *);
@@ -908,7 +912,8 @@ extern unsigned int rxrpc_closed_conn_expiry;
 
 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
-                                                  struct sk_buff *);
+                                                  struct sk_buff *,
+                                                  struct rxrpc_peer **);
 void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
 void rxrpc_disconnect_call(struct rxrpc_call *);
 void rxrpc_kill_connection(struct rxrpc_connection *);
@@ -1031,7 +1036,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
  * peer_event.c
  */
 void rxrpc_error_report(struct sock *);
-void rxrpc_peer_error_distributor(struct work_struct *);
 void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
                        rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
 void rxrpc_peer_keepalive_worker(struct work_struct *);
@@ -1044,13 +1048,11 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
 struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
                                     struct sockaddr_rxrpc *, gfp_t);
 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
-                                             struct rxrpc_peer *);
+void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *);
 void rxrpc_destroy_all_peers(struct rxrpc_net *);
 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
 void rxrpc_put_peer(struct rxrpc_peer *);
-void __rxrpc_queue_peer_error(struct rxrpc_peer *);
 
 /*
  * proc.c
@@ -1093,7 +1095,6 @@ void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
 void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
 void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
 void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
-void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace);
 void rxrpc_purge_queue(struct sk_buff_head *);
 
 /*
@@ -1110,8 +1111,7 @@ static inline void rxrpc_sysctl_exit(void) {}
 /*
  * utils.c
  */
-int rxrpc_extract_addr_from_skb(struct rxrpc_local *, struct sockaddr_rxrpc *,
-                               struct sk_buff *);
+int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
 
 static inline bool before(u32 seq1, u32 seq2)
 {
index 9d1e298b784c8b595626ec0b8f5af0f14e7e03a4..8354cadbb8392e9d5ea655fe3728df4e8d4e65dc 100644 (file)
@@ -249,11 +249,11 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
  */
 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                                                    struct rxrpc_local *local,
+                                                   struct rxrpc_peer *peer,
                                                    struct rxrpc_connection *conn,
                                                    struct sk_buff *skb)
 {
        struct rxrpc_backlog *b = rx->backlog;
-       struct rxrpc_peer *peer, *xpeer;
        struct rxrpc_call *call;
        unsigned short call_head, conn_head, peer_head;
        unsigned short call_tail, conn_tail, peer_tail;
@@ -276,21 +276,18 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                return NULL;
 
        if (!conn) {
-               /* No connection.  We're going to need a peer to start off
-                * with.  If one doesn't yet exist, use a spare from the
-                * preallocation set.  We dump the address into the spare in
-                * anticipation - and to save on stack space.
-                */
-               xpeer = b->peer_backlog[peer_tail];
-               if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
-                       return NULL;
-
-               peer = rxrpc_lookup_incoming_peer(local, xpeer);
-               if (peer == xpeer) {
+               if (peer && !rxrpc_get_peer_maybe(peer))
+                       peer = NULL;
+               if (!peer) {
+                       peer = b->peer_backlog[peer_tail];
+                       if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
+                               return NULL;
                        b->peer_backlog[peer_tail] = NULL;
                        smp_store_release(&b->peer_backlog_tail,
                                          (peer_tail + 1) &
                                          (RXRPC_BACKLOG_MAX - 1));
+
+                       rxrpc_new_incoming_peer(local, peer);
                }
 
                /* Now allocate and set up the connection */
@@ -335,45 +332,31 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
  * The call is returned with the user access mutex held.
  */
 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+                                          struct rxrpc_sock *rx,
+                                          struct rxrpc_peer *peer,
                                           struct rxrpc_connection *conn,
                                           struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       struct rxrpc_sock *rx;
        struct rxrpc_call *call;
-       u16 service_id = sp->hdr.serviceId;
 
        _enter("");
 
-       /* Get the socket providing the service */
-       rx = rcu_dereference(local->service);
-       if (rx && (service_id == rx->srx.srx_service ||
-                  service_id == rx->second_service))
-               goto found_service;
-
-       trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RX_INVALID_OPERATION, EOPNOTSUPP);
-       skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
-       skb->priority = RX_INVALID_OPERATION;
-       _leave(" = NULL [service]");
-       return NULL;
-
-found_service:
        spin_lock(&rx->incoming_lock);
        if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
            rx->sk.sk_state == RXRPC_CLOSE) {
                trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
                                  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
-               skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
                skb->priority = RX_INVALID_OPERATION;
                _leave(" = NULL [close]");
                call = NULL;
                goto out;
        }
 
-       call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
+       call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
        if (!call) {
-               skb->mark = RXRPC_SKB_MARK_BUSY;
+               skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
                _leave(" = NULL [busy]");
                call = NULL;
                goto out;
index 9486293fef5c6f98c96397fc90eb14eecf332196..799f75b6900ddc4a7a5aecf87325b355ebbbcecc 100644 (file)
@@ -400,7 +400,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
        rcu_assign_pointer(conn->channels[chan].call, call);
 
        spin_lock(&conn->params.peer->lock);
-       hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
+       hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
        spin_unlock(&conn->params.peer->lock);
 
        _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
index f8f37188a9322829b8f4277c09b7329d2f4c1da0..8acf74fe24c03646916c1b69cddf8c7be3f79d43 100644 (file)
@@ -710,8 +710,8 @@ int rxrpc_connect_call(struct rxrpc_call *call,
        }
 
        spin_lock_bh(&call->conn->params.peer->lock);
-       hlist_add_head(&call->error_link,
-                      &call->conn->params.peer->error_targets);
+       hlist_add_head_rcu(&call->error_link,
+                          &call->conn->params.peer->error_targets);
        spin_unlock_bh(&call->conn->params.peer->lock);
 
 out:
index 77440a356b14ae60e875fcd94a2613227fd899cf..c332722820c2bbcaa0d8d98464d535ffc36627d9 100644 (file)
@@ -69,10 +69,14 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
  * If successful, a pointer to the connection is returned, but no ref is taken.
  * NULL is returned if there is no match.
  *
+ * When searching for a service call, if we find a peer but no connection, we
+ * return that through *_peer in case we need to create a new service call.
+ *
  * The caller must be holding the RCU read lock.
  */
 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
-                                                  struct sk_buff *skb)
+                                                  struct sk_buff *skb,
+                                                  struct rxrpc_peer **_peer)
 {
        struct rxrpc_connection *conn;
        struct rxrpc_conn_proto k;
@@ -82,14 +86,12 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
 
        _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
 
-       if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
+       if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
                goto not_found;
 
-       k.epoch = sp->hdr.epoch;
-       k.cid   = sp->hdr.cid & RXRPC_CIDMASK;
-
-       /* We may have to handle mixing IPv4 and IPv6 */
-       if (srx.transport.family != local->srx.transport.family) {
+       if (srx.transport.family != local->srx.transport.family &&
+           (srx.transport.family == AF_INET &&
+            local->srx.transport.family != AF_INET6)) {
                pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
                                    srx.transport.family,
                                    local->srx.transport.family);
@@ -99,7 +101,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
        k.epoch = sp->hdr.epoch;
        k.cid   = sp->hdr.cid & RXRPC_CIDMASK;
 
-       if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
+       if (rxrpc_to_server(sp)) {
                /* We need to look up service connections by the full protocol
                 * parameter set.  We look up the peer first as an intermediate
                 * step and then the connection from the peer's tree.
@@ -107,6 +109,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
                peer = rxrpc_lookup_peer_rcu(local, &srx);
                if (!peer)
                        goto not_found;
+               *_peer = peer;
                conn = rxrpc_find_service_conn_rcu(peer, skb);
                if (!conn || atomic_read(&conn->usage) == 0)
                        goto not_found;
@@ -214,7 +217,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
        call->peer->cong_cwnd = call->cong_cwnd;
 
        spin_lock_bh(&conn->params.peer->lock);
-       hlist_del_init(&call->error_link);
+       hlist_del_rcu(&call->error_link);
        spin_unlock_bh(&conn->params.peer->lock);
 
        if (rxrpc_is_client_call(call))
index ee8e7e1d5c0fffd0f14a0c78afdf9a7ff479665b..5b2626929822b6d9daba55c7abbd79195834f7ba 100644 (file)
@@ -622,13 +622,14 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call,
                if (!skb)
                        continue;
 
+               sent_at = skb->tstamp;
+               smp_rmb(); /* Read timestamp before serial. */
                sp = rxrpc_skb(skb);
                if (sp->hdr.serial != orig_serial)
                        continue;
-               smp_rmb();
-               sent_at = skb->tstamp;
                goto found;
        }
+
        return;
 
 found:
@@ -1124,12 +1125,14 @@ void rxrpc_data_ready(struct sock *udp_sk)
 {
        struct rxrpc_connection *conn;
        struct rxrpc_channel *chan;
-       struct rxrpc_call *call;
+       struct rxrpc_call *call = NULL;
        struct rxrpc_skb_priv *sp;
        struct rxrpc_local *local = udp_sk->sk_user_data;
+       struct rxrpc_peer *peer = NULL;
+       struct rxrpc_sock *rx = NULL;
        struct sk_buff *skb;
        unsigned int channel;
-       int ret, skew;
+       int ret, skew = 0;
 
        _enter("%p", udp_sk);
 
@@ -1143,6 +1146,9 @@ void rxrpc_data_ready(struct sock *udp_sk)
                return;
        }
 
+       if (skb->tstamp == 0)
+               skb->tstamp = ktime_get_real();
+
        rxrpc_new_skb(skb, rxrpc_skb_rx_received);
 
        _net("recv skb %p", skb);
@@ -1170,53 +1176,82 @@ void rxrpc_data_ready(struct sock *udp_sk)
                static int lose;
                if ((lose++ & 7) == 7) {
                        trace_rxrpc_rx_lose(sp);
-                       rxrpc_lose_skb(skb, rxrpc_skb_rx_lost);
+                       rxrpc_free_skb(skb, rxrpc_skb_rx_lost);
                        return;
                }
        }
 
        trace_rxrpc_rx_packet(sp);
 
-       _net("Rx RxRPC %s ep=%x call=%x:%x",
-            sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
-            sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
-
-       if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
-           !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
-               _proto("Rx Bad Packet Type %u", sp->hdr.type);
-               goto bad_message;
-       }
-
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_VERSION:
-               if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
+               if (rxrpc_to_client(sp))
                        goto discard;
                rxrpc_post_packet_to_local(local, skb);
                goto out;
 
        case RXRPC_PACKET_TYPE_BUSY:
-               if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
+               if (rxrpc_to_server(sp))
                        goto discard;
                /* Fall through */
+       case RXRPC_PACKET_TYPE_ACK:
+       case RXRPC_PACKET_TYPE_ACKALL:
+               if (sp->hdr.callNumber == 0)
+                       goto bad_message;
+               /* Fall through */
+       case RXRPC_PACKET_TYPE_ABORT:
+               break;
 
        case RXRPC_PACKET_TYPE_DATA:
-               if (sp->hdr.callNumber == 0)
+               if (sp->hdr.callNumber == 0 ||
+                   sp->hdr.seq == 0)
                        goto bad_message;
                if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
                    !rxrpc_validate_jumbo(skb))
                        goto bad_message;
                break;
 
+       case RXRPC_PACKET_TYPE_CHALLENGE:
+               if (rxrpc_to_server(sp))
+                       goto discard;
+               break;
+       case RXRPC_PACKET_TYPE_RESPONSE:
+               if (rxrpc_to_client(sp))
+                       goto discard;
+               break;
+
                /* Packet types 9-11 should just be ignored. */
        case RXRPC_PACKET_TYPE_PARAMS:
        case RXRPC_PACKET_TYPE_10:
        case RXRPC_PACKET_TYPE_11:
                goto discard;
+
+       default:
+               _proto("Rx Bad Packet Type %u", sp->hdr.type);
+               goto bad_message;
        }
 
+       if (sp->hdr.serviceId == 0)
+               goto bad_message;
+
        rcu_read_lock();
 
-       conn = rxrpc_find_connection_rcu(local, skb);
+       if (rxrpc_to_server(sp)) {
+               /* Weed out packets to services we're not offering.  Packets
+                * that would begin a call are explicitly rejected and the rest
+                * are just discarded.
+                */
+               rx = rcu_dereference(local->service);
+               if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
+                           sp->hdr.serviceId != rx->second_service)) {
+                       if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
+                           sp->hdr.seq == 1)
+                               goto unsupported_service;
+                       goto discard_unlock;
+               }
+       }
+
+       conn = rxrpc_find_connection_rcu(local, skb, &peer);
        if (conn) {
                if (sp->hdr.securityIndex != conn->security_ix)
                        goto wrong_security;
@@ -1280,7 +1315,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
                call = rcu_dereference(chan->call);
 
                if (sp->hdr.callNumber > chan->call_id) {
-                       if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) {
+                       if (rxrpc_to_client(sp)) {
                                rcu_read_unlock();
                                goto reject_packet;
                        }
@@ -1297,19 +1332,15 @@ void rxrpc_data_ready(struct sock *udp_sk)
                        if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
                                set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
                }
-       } else {
-               skew = 0;
-               call = NULL;
        }
 
        if (!call || atomic_read(&call->usage) == 0) {
-               if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
-                   sp->hdr.callNumber == 0 ||
+               if (rxrpc_to_client(sp) ||
                    sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
                        goto bad_message_unlock;
                if (sp->hdr.seq != 1)
                        goto discard_unlock;
-               call = rxrpc_new_incoming_call(local, conn, skb);
+               call = rxrpc_new_incoming_call(local, rx, peer, conn, skb);
                if (!call) {
                        rcu_read_unlock();
                        goto reject_packet;
@@ -1340,6 +1371,13 @@ wrong_security:
        skb->priority = RXKADINCONSISTENCY;
        goto post_abort;
 
+unsupported_service:
+       rcu_read_unlock();
+       trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+                         RX_INVALID_OPERATION, EOPNOTSUPP);
+       skb->priority = RX_INVALID_OPERATION;
+       goto post_abort;
+
 reupgrade:
        rcu_read_unlock();
        trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
@@ -1354,7 +1392,7 @@ bad_message:
 protocol_error:
        skb->priority = RX_PROTOCOL_ERROR;
 post_abort:
-       skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+       skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
 reject_packet:
        trace_rxrpc_rx_done(skb->mark, skb->priority);
        rxrpc_reject_packet(local, skb);
index 13bd8a4dfac7126effa56e4a5766634e8278e0cc..927ead43df42592f3029051f5362016f6a636f05 100644 (file)
@@ -39,7 +39,7 @@ static void rxrpc_send_version_request(struct rxrpc_local *local,
 
        _enter("");
 
-       if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
+       if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
                return;
 
        msg.msg_name    = &srx.transport;
index 777c3ed4cfc03d3923e052d95597926a1893a163..94d234e9c685fbe4324726df73800ed0f873e01b 100644 (file)
@@ -135,10 +135,10 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
        }
 
        switch (local->srx.transport.family) {
-       case AF_INET:
-               /* we want to receive ICMP errors */
+       case AF_INET6:
+               /* we want to receive ICMPv6 errors */
                opt = 1;
-               ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
+               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
@@ -146,19 +146,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                }
 
                /* we want to set the don't fragment bit */
-               opt = IP_PMTUDISC_DO;
-               ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
+               opt = IPV6_PMTUDISC_DO;
+               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
                        goto error;
                }
-               break;
 
-       case AF_INET6:
+               /* Fall through and set IPv4 options too otherwise we don't get
+                * errors from IPv4 packets sent through the IPv6 socket.
+                */
+
+       case AF_INET:
                /* we want to receive ICMP errors */
                opt = 1;
-               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
+               ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
@@ -166,13 +169,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                }
 
                /* we want to set the don't fragment bit */
-               opt = IPV6_PMTUDISC_DO;
-               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
+               opt = IP_PMTUDISC_DO;
+               ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
                        goto error;
                }
+
+               /* We want receive timestamps. */
+               opt = 1;
+               ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
+                                       (char *)&opt, sizeof(opt));
+               if (ret < 0) {
+                       _debug("setsockopt failed");
+                       goto error;
+               }
                break;
 
        default:
index ccf5de160444f4f08fa44310dbf4db29bfb8846d..0f0b499d12027cff4ecee7c0cd224fc71e617878 100644 (file)
@@ -124,7 +124,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        struct kvec iov[2];
        rxrpc_serial_t serial;
        rxrpc_seq_t hard_ack, top;
-       ktime_t now;
        size_t len, n;
        int ret;
        u8 reason;
@@ -196,9 +195,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
                /* We need to stick a time in before we send the packet in case
                 * the reply gets back before kernel_sendmsg() completes - but
                 * asking UDP to send the packet can take a relatively long
-                * time, so we update the time after, on the assumption that
-                * the packet transmission is more likely to happen towards the
-                * end of the kernel_sendmsg() call.
+                * time.
                 */
                call->ping_time = ktime_get_real();
                set_bit(RXRPC_CALL_PINGING, &call->flags);
@@ -206,9 +203,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        }
 
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
-       now = ktime_get_real();
-       if (ping)
-               call->ping_time = now;
        conn->params.peer->last_tx_at = ktime_get_seconds();
        if (ret < 0)
                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
@@ -363,8 +357,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
 
        /* If our RTT cache needs working on, request an ACK.  Also request
         * ACKs if a DATA packet appears to have been lost.
+        *
+        * However, we mustn't request an ACK on the last reply packet of a
+        * service call, lest OpenAFS incorrectly send us an ACK with some
+        * soft-ACKs in it and then never follow up with a proper hard ACK.
         */
-       if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
+       if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
+            rxrpc_to_server(sp)
+            ) &&
            (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
             retrans ||
             call->cong_mode == RXRPC_CALL_SLOW_START ||
@@ -378,11 +378,13 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
                if ((lose++ & 7) == 7) {
                        ret = 0;
                        lost = true;
-                       goto done;
                }
        }
 
-       _proto("Tx DATA %%%u { #%u }", serial, sp->hdr.seq);
+       trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
+                           retrans, lost);
+       if (lost)
+               goto done;
 
        /* send the packet with the don't fragment bit set if we currently
         * think it's small enough */
@@ -390,6 +392,11 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
                goto send_fragmentable;
 
        down_read(&conn->params.local->defrag_sem);
+
+       sp->hdr.serial = serial;
+       smp_wmb(); /* Set serial before timestamp */
+       skb->tstamp = ktime_get_real();
+
        /* send the packet by UDP
         * - returns -EMSGSIZE if UDP would have to fragment the packet
         *   to go out of the interface
@@ -410,15 +417,9 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
                goto send_fragmentable;
 
 done:
-       trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
-                           retrans, lost);
        if (ret >= 0) {
-               ktime_t now = ktime_get_real();
-               skb->tstamp = now;
-               smp_wmb();
-               sp->hdr.serial = serial;
                if (whdr.flags & RXRPC_REQUEST_ACK) {
-                       call->peer->rtt_last_req = now;
+                       call->peer->rtt_last_req = skb->tstamp;
                        trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
                        if (call->peer->rtt_usage > 1) {
                                unsigned long nowj = jiffies, ack_lost_at;
@@ -457,6 +458,10 @@ send_fragmentable:
 
        down_write(&conn->params.local->defrag_sem);
 
+       sp->hdr.serial = serial;
+       smp_wmb(); /* Set serial before timestamp */
+       skb->tstamp = ktime_get_real();
+
        switch (conn->params.local->srx.transport.family) {
        case AF_INET:
                opt = IP_PMTUDISC_DONT;
@@ -519,7 +524,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        struct kvec iov[2];
        size_t size;
        __be32 code;
-       int ret;
+       int ret, ioc;
 
        _enter("%d", local->debug_id);
 
@@ -527,7 +532,6 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        iov[0].iov_len = sizeof(whdr);
        iov[1].iov_base = &code;
        iov[1].iov_len = sizeof(code);
-       size = sizeof(whdr) + sizeof(code);
 
        msg.msg_name = &srx.transport;
        msg.msg_control = NULL;
@@ -535,16 +539,30 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        msg.msg_flags = 0;
 
        memset(&whdr, 0, sizeof(whdr));
-       whdr.type = RXRPC_PACKET_TYPE_ABORT;
 
        while ((skb = skb_dequeue(&local->reject_queue))) {
                rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
                sp = rxrpc_skb(skb);
 
-               if (rxrpc_extract_addr_from_skb(local, &srx, skb) == 0) {
-                       msg.msg_namelen = srx.transport_len;
-
+               switch (skb->mark) {
+               case RXRPC_SKB_MARK_REJECT_BUSY:
+                       whdr.type = RXRPC_PACKET_TYPE_BUSY;
+                       size = sizeof(whdr);
+                       ioc = 1;
+                       break;
+               case RXRPC_SKB_MARK_REJECT_ABORT:
+                       whdr.type = RXRPC_PACKET_TYPE_ABORT;
                        code = htonl(skb->priority);
+                       size = sizeof(whdr) + sizeof(code);
+                       ioc = 2;
+                       break;
+               default:
+                       rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+                       continue;
+               }
+
+               if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) {
+                       msg.msg_namelen = srx.transport_len;
 
                        whdr.epoch      = htonl(sp->hdr.epoch);
                        whdr.cid        = htonl(sp->hdr.cid);
index 4f9da2f51c694c3f93d3883476057377664b80e7..81a7869325a625c9c1ba45fed77e25ee23fe738a 100644 (file)
@@ -23,6 +23,8 @@
 #include "ar-internal.h"
 
 static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
+static void rxrpc_distribute_error(struct rxrpc_peer *, int,
+                                  enum rxrpc_call_completion);
 
 /*
  * Find the peer associated with an ICMP packet.
@@ -45,6 +47,8 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
         */
        switch (srx->transport.family) {
        case AF_INET:
+               srx->transport_len = sizeof(srx->transport.sin);
+               srx->transport.family = AF_INET;
                srx->transport.sin.sin_port = serr->port;
                switch (serr->ee.ee_origin) {
                case SO_EE_ORIGIN_ICMP:
@@ -68,20 +72,20 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
 
 #ifdef CONFIG_AF_RXRPC_IPV6
        case AF_INET6:
-               srx->transport.sin6.sin6_port = serr->port;
                switch (serr->ee.ee_origin) {
                case SO_EE_ORIGIN_ICMP6:
                        _net("Rx ICMP6");
+                       srx->transport.sin6.sin6_port = serr->port;
                        memcpy(&srx->transport.sin6.sin6_addr,
                               skb_network_header(skb) + serr->addr_offset,
                               sizeof(struct in6_addr));
                        break;
                case SO_EE_ORIGIN_ICMP:
                        _net("Rx ICMP on v6 sock");
-                       srx->transport.sin6.sin6_addr.s6_addr32[0] = 0;
-                       srx->transport.sin6.sin6_addr.s6_addr32[1] = 0;
-                       srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
-                       memcpy(srx->transport.sin6.sin6_addr.s6_addr + 12,
+                       srx->transport_len = sizeof(srx->transport.sin);
+                       srx->transport.family = AF_INET;
+                       srx->transport.sin.sin_port = serr->port;
+                       memcpy(&srx->transport.sin.sin_addr,
                               skb_network_header(skb) + serr->addr_offset,
                               sizeof(struct in_addr));
                        break;
@@ -194,8 +198,6 @@ void rxrpc_error_report(struct sock *sk)
        rcu_read_unlock();
        rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
 
-       /* The ref we obtained is passed off to the work item */
-       __rxrpc_queue_peer_error(peer);
        _leave("");
 }
 
@@ -205,6 +207,7 @@ void rxrpc_error_report(struct sock *sk)
 static void rxrpc_store_error(struct rxrpc_peer *peer,
                              struct sock_exterr_skb *serr)
 {
+       enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
        struct sock_extended_err *ee;
        int err;
 
@@ -255,7 +258,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
        case SO_EE_ORIGIN_NONE:
        case SO_EE_ORIGIN_LOCAL:
                _proto("Rx Received local error { error=%d }", err);
-               err += RXRPC_LOCAL_ERROR_OFFSET;
+               compl = RXRPC_CALL_LOCAL_ERROR;
                break;
 
        case SO_EE_ORIGIN_ICMP6:
@@ -264,48 +267,23 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
                break;
        }
 
-       peer->error_report = err;
+       rxrpc_distribute_error(peer, err, compl);
 }
 
 /*
- * Distribute an error that occurred on a peer
+ * Distribute an error that occurred on a peer.
  */
-void rxrpc_peer_error_distributor(struct work_struct *work)
+static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
+                                  enum rxrpc_call_completion compl)
 {
-       struct rxrpc_peer *peer =
-               container_of(work, struct rxrpc_peer, error_distributor);
        struct rxrpc_call *call;
-       enum rxrpc_call_completion compl;
-       int error;
-
-       _enter("");
-
-       error = READ_ONCE(peer->error_report);
-       if (error < RXRPC_LOCAL_ERROR_OFFSET) {
-               compl = RXRPC_CALL_NETWORK_ERROR;
-       } else {
-               compl = RXRPC_CALL_LOCAL_ERROR;
-               error -= RXRPC_LOCAL_ERROR_OFFSET;
-       }
-
-       _debug("ISSUE ERROR %s %d", rxrpc_call_completions[compl], error);
 
-       spin_lock_bh(&peer->lock);
-
-       while (!hlist_empty(&peer->error_targets)) {
-               call = hlist_entry(peer->error_targets.first,
-                                  struct rxrpc_call, error_link);
-               hlist_del_init(&call->error_link);
+       hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
                rxrpc_see_call(call);
-
-               if (rxrpc_set_call_completion(call, compl, 0, -error))
+               if (call->state < RXRPC_CALL_COMPLETE &&
+                   rxrpc_set_call_completion(call, compl, 0, -error))
                        rxrpc_notify_socket(call);
        }
-
-       spin_unlock_bh(&peer->lock);
-
-       rxrpc_put_peer(peer);
-       _leave("");
 }
 
 /*
index 1dc7648e3eff34f25ceea7b0edbd74b5f8cd02b3..01a9febfa36714da7293c1b9b5a5235d0947f8d0 100644 (file)
@@ -124,11 +124,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
        struct rxrpc_net *rxnet = local->rxnet;
 
        hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
-               if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) {
-                       if (atomic_read(&peer->usage) == 0)
-                               return NULL;
+               if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
+                   atomic_read(&peer->usage) > 0)
                        return peer;
-               }
        }
 
        return NULL;
@@ -222,8 +220,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
                atomic_set(&peer->usage, 1);
                peer->local = local;
                INIT_HLIST_HEAD(&peer->error_targets);
-               INIT_WORK(&peer->error_distributor,
-                         &rxrpc_peer_error_distributor);
                peer->service_conns = RB_ROOT;
                seqlock_init(&peer->service_conn_lock);
                spin_lock_init(&peer->lock);
@@ -299,34 +295,23 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
 }
 
 /*
- * Set up a new incoming peer.  The address is prestored in the preallocated
- * peer.
+ * Set up a new incoming peer.  There shouldn't be any other matching peers
+ * since we've already done a search in the list from the non-reentrant context
+ * (the data_ready handler) that is the only place we can add new peers.
  */
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
-                                             struct rxrpc_peer *prealloc)
+void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
 {
-       struct rxrpc_peer *peer;
        struct rxrpc_net *rxnet = local->rxnet;
        unsigned long hash_key;
 
-       hash_key = rxrpc_peer_hash_key(local, &prealloc->srx);
-       prealloc->local = local;
-       rxrpc_init_peer(prealloc, hash_key);
+       hash_key = rxrpc_peer_hash_key(local, &peer->srx);
+       peer->local = local;
+       rxrpc_init_peer(peer, hash_key);
 
        spin_lock(&rxnet->peer_hash_lock);
-
-       /* Need to check that we aren't racing with someone else */
-       peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
-       if (peer && !rxrpc_get_peer_maybe(peer))
-               peer = NULL;
-       if (!peer) {
-               peer = prealloc;
-               hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
-               list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
-       }
-
+       hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
+       list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
        spin_unlock(&rxnet->peer_hash_lock);
-       return peer;
 }
 
 /*
@@ -415,21 +400,6 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
        return peer;
 }
 
-/*
- * Queue a peer record.  This passes the caller's ref to the workqueue.
- */
-void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
-{
-       const void *here = __builtin_return_address(0);
-       int n;
-
-       n = atomic_read(&peer->usage);
-       if (rxrpc_queue_work(&peer->error_distributor))
-               trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
-       else
-               rxrpc_put_peer(peer);
-}
-
 /*
  * Discard a peer record.
  */
index 93da73bf709857bbd48b2859092175bf43df8dfd..f9cb83c938f35d4ad8e381658da53e8f2494ff6a 100644 (file)
@@ -50,7 +50,6 @@ struct rxrpc_wire_header {
 #define RXRPC_PACKET_TYPE_10           10      /* Ignored */
 #define RXRPC_PACKET_TYPE_11           11      /* Ignored */
 #define RXRPC_PACKET_TYPE_VERSION      13      /* version string request */
-#define RXRPC_N_PACKET_TYPES           14      /* number of packet types (incl type 0) */
 
        uint8_t         flags;          /* packet flags */
 #define RXRPC_CLIENT_INITIATED 0x01            /* signifies a packet generated by a client */
@@ -72,20 +71,6 @@ struct rxrpc_wire_header {
 
 } __packed;
 
-#define RXRPC_SUPPORTED_PACKET_TYPES (                 \
-               (1 << RXRPC_PACKET_TYPE_DATA) |         \
-               (1 << RXRPC_PACKET_TYPE_ACK) |          \
-               (1 << RXRPC_PACKET_TYPE_BUSY) |         \
-               (1 << RXRPC_PACKET_TYPE_ABORT) |        \
-               (1 << RXRPC_PACKET_TYPE_ACKALL) |       \
-               (1 << RXRPC_PACKET_TYPE_CHALLENGE) |    \
-               (1 << RXRPC_PACKET_TYPE_RESPONSE) |     \
-               /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */   \
-               (1 << RXRPC_PACKET_TYPE_PARAMS) |       \
-               (1 << RXRPC_PACKET_TYPE_10) |           \
-               (1 << RXRPC_PACKET_TYPE_11) |           \
-               (1 << RXRPC_PACKET_TYPE_VERSION))
-
 /*****************************************************************************/
 /*
  * jumbo packet secondary header
index 816b19a78809349984f3fee0fbccbc7a32cda031..eaf19ebaa964e6442cfa4117f862fb4aca9b2345 100644 (file)
@@ -715,3 +715,46 @@ call_complete:
        goto out;
 }
 EXPORT_SYMBOL(rxrpc_kernel_recv_data);
+
+/**
+ * rxrpc_kernel_get_reply_time - Get timestamp on first reply packet
+ * @sock: The socket that the call exists on
+ * @call: The call to query
+ * @_ts: Where to put the timestamp
+ *
+ * Retrieve the timestamp from the first DATA packet of the reply if it is
+ * in the ring.  Returns true if successful, false if not.
+ */
+bool rxrpc_kernel_get_reply_time(struct socket *sock, struct rxrpc_call *call,
+                                ktime_t *_ts)
+{
+       struct sk_buff *skb;
+       rxrpc_seq_t hard_ack, top, seq;
+       bool success = false;
+
+       mutex_lock(&call->user_mutex);
+
+       if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_RECV_REPLY)
+               goto out;
+
+       hard_ack = call->rx_hard_ack;
+       if (hard_ack != 0)
+               goto out;
+
+       seq = hard_ack + 1;
+       top = smp_load_acquire(&call->rx_top);
+       if (after(seq, top))
+               goto out;
+
+       skb = call->rxtx_buffer[seq & RXRPC_RXTX_BUFF_MASK];
+       if (!skb)
+               goto out;
+
+       *_ts = skb_get_ktime(skb);
+       success = true;
+
+out:
+       mutex_unlock(&call->user_mutex);
+       return success;
+}
+EXPORT_SYMBOL(rxrpc_kernel_get_reply_time);
index b8985d01876a27168fbdc411208aa2984535557b..913dca65cc6573d646f54a9583b9dbc5d7f6f017 100644 (file)
@@ -68,21 +68,6 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
        }
 }
 
-/*
- * Note the injected loss of a socket buffer.
- */
-void rxrpc_lose_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
-{
-       const void *here = __builtin_return_address(0);
-       if (skb) {
-               int n;
-               CHECK_SLAB_OKAY(&skb->users);
-               n = atomic_dec_return(select_skb_count(op));
-               trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
-               kfree_skb(skb);
-       }
-}
-
 /*
  * Clear a queue of socket buffers.
  */
index e801171fa351410025addaf0519b5c3844ea5b4f..ff7af71c4b49815b9406061c02ad453275df026c 100644 (file)
 /*
  * Fill out a peer address from a socket buffer containing a packet.
  */
-int rxrpc_extract_addr_from_skb(struct rxrpc_local *local,
-                               struct sockaddr_rxrpc *srx,
-                               struct sk_buff *skb)
+int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *srx, struct sk_buff *skb)
 {
        memset(srx, 0, sizeof(*srx));
 
        switch (ntohs(skb->protocol)) {
        case ETH_P_IP:
-               if (local->srx.transport.family == AF_INET6) {
-                       srx->transport_type = SOCK_DGRAM;
-                       srx->transport_len = sizeof(srx->transport.sin6);
-                       srx->transport.sin6.sin6_family = AF_INET6;
-                       srx->transport.sin6.sin6_port = udp_hdr(skb)->source;
-                       srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
-                       srx->transport.sin6.sin6_addr.s6_addr32[3] = ip_hdr(skb)->saddr;
-               } else {
-                       srx->transport_type = SOCK_DGRAM;
-                       srx->transport_len = sizeof(srx->transport.sin);
-                       srx->transport.sin.sin_family = AF_INET;
-                       srx->transport.sin.sin_port = udp_hdr(skb)->source;
-                       srx->transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
-               }
+               srx->transport_type = SOCK_DGRAM;
+               srx->transport_len = sizeof(srx->transport.sin);
+               srx->transport.sin.sin_family = AF_INET;
+               srx->transport.sin.sin_port = udp_hdr(skb)->source;
+               srx->transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
                return 0;
 
 #ifdef CONFIG_AF_RXRPC_IPV6
index e9574138831116a59db2c39aaa2cd4ee4f28e06b..1b9afdee5ba976ba64200d8f85050cf053b7d65c 100644 (file)
@@ -194,6 +194,17 @@ config NET_SCH_ETF
          To compile this code as a module, choose M here: the
          module will be called sch_etf.
 
+config NET_SCH_TAPRIO
+       tristate "Time Aware Priority (taprio) Scheduler"
+       help
+         Say Y here if you want to use the Time Aware Priority (taprio) packet
+         scheduling algorithm.
+
+         See the top of <file:net/sched/sch_taprio.c> for more details.
+
+         To compile this code as a module, choose M here: the
+         module will be called sch_taprio.
+
 config NET_SCH_GRED
        tristate "Generic Random Early Detection (GRED)"
        ---help---
index f0403f49edcbd50e27d9ea450c2e46b5b4727b8e..8a40431d7b5c420d86427933a9af383e093812b7 100644 (file)
@@ -57,6 +57,7 @@ obj-$(CONFIG_NET_SCH_HHF)     += sch_hhf.o
 obj-$(CONFIG_NET_SCH_PIE)      += sch_pie.o
 obj-$(CONFIG_NET_SCH_CBS)      += sch_cbs.o
 obj-$(CONFIG_NET_SCH_ETF)      += sch_etf.o
+obj-$(CONFIG_NET_SCH_TAPRIO)   += sch_taprio.o
 
 obj-$(CONFIG_NET_CLS_U32)      += cls_u32.o
 obj-$(CONFIG_NET_CLS_ROUTE4)   += cls_route.o
index 3c7c2342188533e5fca6d001417c7e42f8c17138..9c1b0729aebf7146db03f534b433e6e871093ca8 100644 (file)
@@ -104,11 +104,11 @@ static int __tcf_action_put(struct tc_action *p, bool bind)
 {
        struct tcf_idrinfo *idrinfo = p->idrinfo;
 
-       if (refcount_dec_and_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
+       if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
                if (bind)
                        atomic_dec(&p->tcfa_bindcnt);
                idr_remove(&idrinfo->action_idr, p->tcfa_index);
-               spin_unlock(&idrinfo->lock);
+               mutex_unlock(&idrinfo->lock);
 
                tcf_action_cleanup(p);
                return 1;
@@ -200,7 +200,7 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
        struct tc_action *p;
        unsigned long id = 1;
 
-       spin_lock(&idrinfo->lock);
+       mutex_lock(&idrinfo->lock);
 
        s_i = cb->args[0];
 
@@ -235,7 +235,7 @@ done:
        if (index >= 0)
                cb->args[0] = index + 1;
 
-       spin_unlock(&idrinfo->lock);
+       mutex_unlock(&idrinfo->lock);
        if (n_i) {
                if (act_flags & TCA_FLAG_LARGE_DUMP_ON)
                        cb->args[1] = n_i;
@@ -277,18 +277,18 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
        if (nla_put_string(skb, TCA_KIND, ops->kind))
                goto nla_put_failure;
 
-       spin_lock(&idrinfo->lock);
+       mutex_lock(&idrinfo->lock);
        idr_for_each_entry_ul(idr, p, id) {
                ret = tcf_idr_release_unsafe(p);
                if (ret == ACT_P_DELETED) {
                        module_put(ops->owner);
                        n_i++;
                } else if (ret < 0) {
-                       spin_unlock(&idrinfo->lock);
+                       mutex_unlock(&idrinfo->lock);
                        goto nla_put_failure;
                }
        }
-       spin_unlock(&idrinfo->lock);
+       mutex_unlock(&idrinfo->lock);
 
        if (nla_put_u32(skb, TCA_FCNT, n_i))
                goto nla_put_failure;
@@ -324,13 +324,13 @@ int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
        struct tcf_idrinfo *idrinfo = tn->idrinfo;
        struct tc_action *p;
 
-       spin_lock(&idrinfo->lock);
+       mutex_lock(&idrinfo->lock);
        p = idr_find(&idrinfo->action_idr, index);
        if (IS_ERR(p))
                p = NULL;
        else if (p)
                refcount_inc(&p->tcfa_refcnt);
-       spin_unlock(&idrinfo->lock);
+       mutex_unlock(&idrinfo->lock);
 
        if (p) {
                *a = p;
@@ -345,10 +345,10 @@ static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
        struct tc_action *p;
        int ret = 0;
 
-       spin_lock(&idrinfo->lock);
+       mutex_lock(&idrinfo->lock);
        p = idr_find(&idrinfo->action_idr, index);
        if (!p) {
-               spin_unlock(&idrinfo->lock);
+               mutex_unlock(&idrinfo->lock);
                return -ENOENT;
        }
 
@@ -358,7 +358,7 @@ static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
 
                        WARN_ON(p != idr_remove(&idrinfo->action_idr,
                                                p->tcfa_index));
-                       spin_unlock(&idrinfo->lock);
+                       mutex_unlock(&idrinfo->lock);
 
                        tcf_action_cleanup(p);
                        module_put(owner);
@@ -369,7 +369,7 @@ static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
                ret = -EPERM;
        }
 
-       spin_unlock(&idrinfo->lock);
+       mutex_unlock(&idrinfo->lock);
        return ret;
 }
 
@@ -431,10 +431,10 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
 {
        struct tcf_idrinfo *idrinfo = tn->idrinfo;
 
-       spin_lock(&idrinfo->lock);
+       mutex_lock(&idrinfo->lock);
        /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
        WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index)));
-       spin_unlock(&idrinfo->lock);
+       mutex_unlock(&idrinfo->lock);
 }
 EXPORT_SYMBOL(tcf_idr_insert);
 
@@ -444,10 +444,10 @@ void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
 {
        struct tcf_idrinfo *idrinfo = tn->idrinfo;
 
-       spin_lock(&idrinfo->lock);
+       mutex_lock(&idrinfo->lock);
        /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
        WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
-       spin_unlock(&idrinfo->lock);
+       mutex_unlock(&idrinfo->lock);
 }
 EXPORT_SYMBOL(tcf_idr_cleanup);
 
@@ -465,14 +465,14 @@ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
        int ret;
 
 again:
-       spin_lock(&idrinfo->lock);
+       mutex_lock(&idrinfo->lock);
        if (*index) {
                p = idr_find(&idrinfo->action_idr, *index);
                if (IS_ERR(p)) {
                        /* This means that another process allocated
                         * index but did not assign the pointer yet.
                         */
-                       spin_unlock(&idrinfo->lock);
+                       mutex_unlock(&idrinfo->lock);
                        goto again;
                }
 
@@ -485,7 +485,7 @@ again:
                } else {
                        *a = NULL;
                        ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
-                                           *index, GFP_ATOMIC);
+                                           *index, GFP_KERNEL);
                        if (!ret)
                                idr_replace(&idrinfo->action_idr,
                                            ERR_PTR(-EBUSY), *index);
@@ -494,12 +494,12 @@ again:
                *index = 1;
                *a = NULL;
                ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
-                                   UINT_MAX, GFP_ATOMIC);
+                                   UINT_MAX, GFP_KERNEL);
                if (!ret)
                        idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
                                    *index);
        }
-       spin_unlock(&idrinfo->lock);
+       mutex_unlock(&idrinfo->lock);
        return ret;
 }
 EXPORT_SYMBOL(tcf_idr_check_alloc);
@@ -1452,7 +1452,7 @@ static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
        u32 act_count = 0;
 
        ret = nlmsg_parse(cb->nlh, sizeof(struct tcamsg), tb, TCA_ROOT_MAX,
-                         tcaa_policy, NULL);
+                         tcaa_policy, cb->extack);
        if (ret < 0)
                return ret;
 
index 1efbfb10b1fc07b7d0744ce6d96fae9dcfe5282f..8af6c11d2482ac53e6520691c85ff291a4704a81 100644 (file)
@@ -135,7 +135,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
        }
 
        td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
-       if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
+       if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
                if (exists)
                        tcf_idr_release(*a, bind);
                else
index 3de47e99b788f81e914efebe9ef89e7f32c1f4bc..43c8559aca563bbd3b18504c7587c89e877a2779 100644 (file)
@@ -717,8 +717,10 @@ static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
 errout_rcu:
        rcu_read_unlock();
 errout_qdisc:
-       if (*q)
+       if (*q) {
                qdisc_put(*q);
+               *q = NULL;
+       }
        return ERR_PTR(err);
 }
 
@@ -1725,7 +1727,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
        if (nlmsg_len(cb->nlh) < sizeof(*tcm))
                return skb->len;
 
-       err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
+       err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL,
+                         cb->extack);
        if (err)
                return err;
 
@@ -2052,7 +2055,8 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
        if (nlmsg_len(cb->nlh) < sizeof(*tcm))
                return skb->len;
 
-       err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
+       err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL,
+                         cb->extack);
        if (err)
                return err;
 
index 92dd5071a708a1cdf5add0ae524292d966589d86..9aada2d0ef06567a962f5bb2e929370278e5a2af 100644 (file)
@@ -79,7 +79,6 @@ struct fl_flow_tmplt {
        struct fl_flow_key mask;
        struct flow_dissector dissector;
        struct tcf_chain *chain;
-       struct rcu_work rwork;
 };
 
 struct cls_fl_head {
@@ -1438,20 +1437,12 @@ errout_tb:
        return ERR_PTR(err);
 }
 
-static void fl_tmplt_destroy_work(struct work_struct *work)
-{
-       struct fl_flow_tmplt *tmplt = container_of(to_rcu_work(work),
-                                                struct fl_flow_tmplt, rwork);
-
-       fl_hw_destroy_tmplt(tmplt->chain, tmplt);
-       kfree(tmplt);
-}
-
 static void fl_tmplt_destroy(void *tmplt_priv)
 {
        struct fl_flow_tmplt *tmplt = tmplt_priv;
 
-       tcf_queue_work(&tmplt->rwork, fl_tmplt_destroy_work);
+       fl_hw_destroy_tmplt(tmplt->chain, tmplt);
+       kfree(tmplt);
 }
 
 static int fl_dump_key_val(struct sk_buff *skb,
index f218ccf1e2d9a651ad07c2a6276742b97d3b2102..ac79a40a0392adb26cf878f6bcc70aa0a5f31aa5 100644 (file)
@@ -68,7 +68,6 @@ struct tc_u_knode {
        u32                     mask;
        u32 __percpu            *pcpu_success;
 #endif
-       struct tcf_proto        *tp;
        struct rcu_work         rwork;
        /* The 'sel' field MUST be the last field in structure to allow for
         * tc_u32_keys allocated at end of structure.
@@ -80,10 +79,10 @@ struct tc_u_hnode {
        struct tc_u_hnode __rcu *next;
        u32                     handle;
        u32                     prio;
-       struct tc_u_common      *tp_c;
        int                     refcnt;
        unsigned int            divisor;
        struct idr              handle_idr;
+       bool                    is_root;
        struct rcu_head         rcu;
        u32                     flags;
        /* The 'ht' field MUST be the last field in structure to allow for
@@ -98,7 +97,7 @@ struct tc_u_common {
        int                     refcnt;
        struct idr              handle_idr;
        struct hlist_node       hnode;
-       struct rcu_head         rcu;
+       long                    knodes;
 };
 
 static inline unsigned int u32_hash_fold(__be32 key,
@@ -344,19 +343,16 @@ static void *tc_u_common_ptr(const struct tcf_proto *tp)
                return block->q;
 }
 
-static unsigned int tc_u_hash(const struct tcf_proto *tp)
+static struct hlist_head *tc_u_hash(void *key)
 {
-       return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT);
+       return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
 }
 
-static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
+static struct tc_u_common *tc_u_common_find(void *key)
 {
        struct tc_u_common *tc;
-       unsigned int h;
-
-       h = tc_u_hash(tp);
-       hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) {
-               if (tc->ptr == tc_u_common_ptr(tp))
+       hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
+               if (tc->ptr == key)
                        return tc;
        }
        return NULL;
@@ -365,10 +361,8 @@ static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
 static int u32_init(struct tcf_proto *tp)
 {
        struct tc_u_hnode *root_ht;
-       struct tc_u_common *tp_c;
-       unsigned int h;
-
-       tp_c = tc_u_common_find(tp);
+       void *key = tc_u_common_ptr(tp);
+       struct tc_u_common *tp_c = tc_u_common_find(key);
 
        root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
        if (root_ht == NULL)
@@ -377,6 +371,7 @@ static int u32_init(struct tcf_proto *tp)
        root_ht->refcnt++;
        root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
        root_ht->prio = tp->prio;
+       root_ht->is_root = true;
        idr_init(&root_ht->handle_idr);
 
        if (tp_c == NULL) {
@@ -385,26 +380,23 @@ static int u32_init(struct tcf_proto *tp)
                        kfree(root_ht);
                        return -ENOBUFS;
                }
-               tp_c->ptr = tc_u_common_ptr(tp);
+               tp_c->ptr = key;
                INIT_HLIST_NODE(&tp_c->hnode);
                idr_init(&tp_c->handle_idr);
 
-               h = tc_u_hash(tp);
-               hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]);
+               hlist_add_head(&tp_c->hnode, tc_u_hash(key));
        }
 
        tp_c->refcnt++;
        RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
        rcu_assign_pointer(tp_c->hlist, root_ht);
-       root_ht->tp_c = tp_c;
 
        rcu_assign_pointer(tp->root, root_ht);
        tp->data = tp_c;
        return 0;
 }
 
-static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
-                          bool free_pf)
+static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
 {
        struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
 
@@ -438,7 +430,7 @@ static void u32_delete_key_work(struct work_struct *work)
                                              struct tc_u_knode,
                                              rwork);
        rtnl_lock();
-       u32_destroy_key(key->tp, key, false);
+       u32_destroy_key(key, false);
        rtnl_unlock();
 }
 
@@ -455,12 +447,13 @@ static void u32_delete_key_freepf_work(struct work_struct *work)
                                              struct tc_u_knode,
                                              rwork);
        rtnl_lock();
-       u32_destroy_key(key->tp, key, true);
+       u32_destroy_key(key, true);
        rtnl_unlock();
 }
 
 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
 {
+       struct tc_u_common *tp_c = tp->data;
        struct tc_u_knode __rcu **kp;
        struct tc_u_knode *pkp;
        struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
@@ -471,6 +464,7 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
                     kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
                        if (pkp == key) {
                                RCU_INIT_POINTER(*kp, key->next);
+                               tp_c->knodes--;
 
                                tcf_unbind_filter(tp, &key->res);
                                idr_remove(&ht->handle_idr, key->handle);
@@ -585,6 +579,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
                            struct netlink_ext_ack *extack)
 {
+       struct tc_u_common *tp_c = tp->data;
        struct tc_u_knode *n;
        unsigned int h;
 
@@ -592,13 +587,14 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
                while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
                        RCU_INIT_POINTER(ht->ht[h],
                                         rtnl_dereference(n->next));
+                       tp_c->knodes--;
                        tcf_unbind_filter(tp, &n->res);
                        u32_remove_hw_knode(tp, n, extack);
                        idr_remove(&ht->handle_idr, n->handle);
                        if (tcf_exts_get_net(&n->exts))
                                tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
                        else
-                               u32_destroy_key(n->tp, n, true);
+                               u32_destroy_key(n, true);
                }
        }
 }
@@ -631,17 +627,6 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
        return -ENOENT;
 }
 
-static bool ht_empty(struct tc_u_hnode *ht)
-{
-       unsigned int h;
-
-       for (h = 0; h <= ht->divisor; h++)
-               if (rcu_access_pointer(ht->ht[h]))
-                       return false;
-
-       return true;
-}
-
 static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_u_common *tp_c = tp->data;
@@ -679,20 +664,16 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
                      struct netlink_ext_ack *extack)
 {
        struct tc_u_hnode *ht = arg;
-       struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
        struct tc_u_common *tp_c = tp->data;
        int ret = 0;
 
-       if (ht == NULL)
-               goto out;
-
        if (TC_U32_KEY(ht->handle)) {
                u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
                ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
                goto out;
        }
 
-       if (root_ht == ht) {
+       if (ht->is_root) {
                NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
                return -EINVAL;
        }
@@ -706,38 +687,7 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
        }
 
 out:
-       *last = true;
-       if (root_ht) {
-               if (root_ht->refcnt > 1) {
-                       *last = false;
-                       goto ret;
-               }
-               if (root_ht->refcnt == 1) {
-                       if (!ht_empty(root_ht)) {
-                               *last = false;
-                               goto ret;
-                       }
-               }
-       }
-
-       if (tp_c->refcnt > 1) {
-               *last = false;
-               goto ret;
-       }
-
-       if (tp_c->refcnt == 1) {
-               struct tc_u_hnode *ht;
-
-               for (ht = rtnl_dereference(tp_c->hlist);
-                    ht;
-                    ht = rtnl_dereference(ht->next))
-                       if (!ht_empty(ht)) {
-                               *last = false;
-                               break;
-                       }
-       }
-
-ret:
+       *last = tp_c->refcnt == 1 && tp_c->knodes == 0;
        return ret;
 }
 
@@ -768,7 +718,7 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
 };
 
 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
-                        unsigned long base, struct tc_u_hnode *ht,
+                        unsigned long base,
                         struct tc_u_knode *n, struct nlattr **tb,
                         struct nlattr *est, bool ovr,
                         struct netlink_ext_ack *extack)
@@ -789,12 +739,16 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
                }
 
                if (handle) {
-                       ht_down = u32_lookup_ht(ht->tp_c, handle);
+                       ht_down = u32_lookup_ht(tp->data, handle);
 
                        if (!ht_down) {
                                NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
                                return -EINVAL;
                        }
+                       if (ht_down->is_root) {
+                               NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
+                               return -EINVAL;
+                       }
                        ht_down->refcnt++;
                }
 
@@ -891,7 +845,6 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
        /* Similarly success statistics must be moved as pointers */
        new->pcpu_success = n->pcpu_success;
 #endif
-       new->tp = tp;
        memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
 
        if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) {
@@ -960,18 +913,17 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                if (!new)
                        return -ENOMEM;
 
-               err = u32_set_parms(net, tp, base,
-                                   rtnl_dereference(n->ht_up), new, tb,
+               err = u32_set_parms(net, tp, base, new, tb,
                                    tca[TCA_RATE], ovr, extack);
 
                if (err) {
-                       u32_destroy_key(tp, new, false);
+                       u32_destroy_key(new, false);
                        return err;
                }
 
                err = u32_replace_hw_knode(tp, new, flags, extack);
                if (err) {
-                       u32_destroy_key(tp, new, false);
+                       u32_destroy_key(new, false);
                        return err;
                }
 
@@ -988,7 +940,11 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        if (tb[TCA_U32_DIVISOR]) {
                unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
 
-               if (--divisor > 0x100) {
+               if (!is_power_of_2(divisor)) {
+                       NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
+                       return -EINVAL;
+               }
+               if (divisor-- > 0x100) {
                        NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
                        return -EINVAL;
                }
@@ -1013,7 +969,6 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                                return err;
                        }
                }
-               ht->tp_c = tp_c;
                ht->refcnt = 1;
                ht->divisor = divisor;
                ht->handle = handle;
@@ -1103,7 +1058,6 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        n->handle = handle;
        n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
        n->flags = flags;
-       n->tp = tp;
 
        err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
        if (err < 0)
@@ -1125,7 +1079,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        }
 #endif
 
-       err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr,
+       err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], ovr,
                            extack);
        if (err == 0) {
                struct tc_u_knode __rcu **ins;
@@ -1146,6 +1100,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
 
                RCU_INIT_POINTER(n->next, pins);
                rcu_assign_pointer(*ins, n);
+               tp_c->knodes++;
                *arg = n;
                return 0;
        }
index 22e9799e5b697f45baee7f1a334ed2ff719c5563..cf5c714ae786c376259a44f8de6f561e848383dc 100644 (file)
@@ -1322,6 +1322,18 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
  * Delete/get qdisc.
  */
 
+const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
+       [TCA_KIND]              = { .type = NLA_STRING },
+       [TCA_OPTIONS]           = { .type = NLA_NESTED },
+       [TCA_RATE]              = { .type = NLA_BINARY,
+                                   .len = sizeof(struct tc_estimator) },
+       [TCA_STAB]              = { .type = NLA_NESTED },
+       [TCA_DUMP_INVISIBLE]    = { .type = NLA_FLAG },
+       [TCA_CHAIN]             = { .type = NLA_U32 },
+       [TCA_INGRESS_BLOCK]     = { .type = NLA_U32 },
+       [TCA_EGRESS_BLOCK]      = { .type = NLA_U32 },
+};
+
 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
                        struct netlink_ext_ack *extack)
 {
@@ -1338,7 +1350,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
            !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
-       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+                         extack);
        if (err < 0)
                return err;
 
@@ -1422,7 +1435,8 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
 
 replay:
        /* Reinit, just in case something touches this. */
-       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+                         extack);
        if (err < 0)
                return err;
 
@@ -1656,7 +1670,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
        idx = 0;
        ASSERT_RTNL();
 
-       err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, NULL, NULL);
+       err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
+                         rtm_tca_policy, cb->extack);
        if (err < 0)
                return err;
 
@@ -1875,7 +1890,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
            !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
-       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+                         extack);
        if (err < 0)
                return err;
 
index 628a2cdcfc6f2fa69d9402f06881949d2e1423d9..338222a6c664b1825aaada4355e2fc0a01db9c73 100644 (file)
@@ -412,7 +412,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
 {
        struct fq_sched_data *q = qdisc_priv(sch);
-       u64 now = ktime_get_tai_ns();
+       u64 now = ktime_get_ns();
        struct fq_flow_head *head;
        struct sk_buff *skb;
        struct fq_flow *f;
@@ -776,7 +776,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
        q->fq_trees_log         = ilog2(1024);
        q->orphan_mask          = 1024 - 1;
        q->low_rate_threshold   = 550000 / 8;
-       qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_TAI);
+       qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
 
        if (opt)
                err = fq_change(sch, opt, extack);
@@ -831,7 +831,7 @@ static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
        st.flows_plimit           = q->stat_flows_plimit;
        st.pkts_too_long          = q->stat_pkts_too_long;
        st.allocation_errors      = q->stat_allocation_errors;
-       st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_tai_ns();
+       st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
        st.flows                  = q->flows;
        st.inactive_flows         = q->inactive_flows;
        st.throttled_flows        = q->throttled_flows;
index 531fac1d287541f76571e5a1b811716321ca289b..3023929852e8c4aaa4172861d2d0beff17e25f27 100644 (file)
@@ -941,7 +941,7 @@ void qdisc_free(struct Qdisc *qdisc)
        kfree((char *) qdisc - qdisc->padded);
 }
 
-void qdisc_free_cb(struct rcu_head *head)
+static void qdisc_free_cb(struct rcu_head *head)
 {
        struct Qdisc *q = container_of(head, struct Qdisc, rcu);
 
index 18d30bb86881aba8a5c5521181cba11038945672..d1429371592fc0f2cb407a72eff1e01b16d9fadc 100644 (file)
@@ -110,8 +110,8 @@ static bool drop_early(struct Qdisc *sch, u32 packet_size)
        /* If current delay is less than half of target, and
         * if drop prob is low already, disable early_drop
         */
-       if ((q->vars.qdelay < q->params.target / 2)
-           && (q->vars.prob < MAX_PROB / 5))
+       if ((q->vars.qdelay < q->params.target / 2) &&
+           (q->vars.prob < MAX_PROB / 5))
                return false;
 
        /* If we have fewer than 2 mtu-sized packets, disable drop_early,
@@ -209,7 +209,8 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
 
        /* tupdate is in jiffies */
        if (tb[TCA_PIE_TUPDATE])
-               q->params.tupdate = usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
+               q->params.tupdate =
+                       usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
 
        if (tb[TCA_PIE_LIMIT]) {
                u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
@@ -247,7 +248,6 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
 
 static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
 {
-
        struct pie_sched_data *q = qdisc_priv(sch);
        int qlen = sch->qstats.backlog; /* current queue size in bytes */
 
@@ -294,9 +294,9 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
                         * dq_count to 0 to re-enter the if block when the next
                         * packet is dequeued
                         */
-                       if (qlen < QUEUE_THRESHOLD)
+                       if (qlen < QUEUE_THRESHOLD) {
                                q->vars.dq_count = DQCOUNT_INVALID;
-                       else {
+                       else {
                                q->vars.dq_count = 0;
                                q->vars.dq_tstamp = psched_get_time();
                        }
@@ -370,7 +370,7 @@ static void calculate_probability(struct Qdisc *sch)
        oldprob = q->vars.prob;
 
        /* to ensure we increase probability in steps of no more than 2% */
-       if (delta > (s32) (MAX_PROB / (100 / 2)) &&
+       if (delta > (s32)(MAX_PROB / (100 / 2)) &&
            q->vars.prob >= MAX_PROB / 10)
                delta = (MAX_PROB / 100) * 2;
 
@@ -405,7 +405,7 @@ static void calculate_probability(struct Qdisc *sch)
         * delay is 0 for 2 consecutive Tupdate periods.
         */
 
-       if ((qdelay == 0) && (qdelay_old == 0) && update_prob)
+       if (qdelay == 0 && qdelay_old == 0 && update_prob)
                q->vars.prob = (q->vars.prob * 98) / 100;
 
        q->vars.qdelay = qdelay;
@@ -419,8 +419,8 @@ static void calculate_probability(struct Qdisc *sch)
         */
        if ((q->vars.qdelay < q->params.target / 2) &&
            (q->vars.qdelay_old < q->params.target / 2) &&
-           (q->vars.prob == 0) &&
-           (q->vars.avg_dq_rate > 0))
+           q->vars.prob == 0 &&
+           q->vars.avg_dq_rate > 0)
                pie_vars_init(&q->vars);
 }
 
@@ -437,7 +437,6 @@ static void pie_timer(struct timer_list *t)
        if (q->params.tupdate)
                mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
        spin_unlock(root_lock);
-
 }
 
 static int pie_init(struct Qdisc *sch, struct nlattr *opt,
@@ -469,15 +468,16 @@ static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
        struct nlattr *opts;
 
        opts = nla_nest_start(skb, TCA_OPTIONS);
-       if (opts == NULL)
+       if (!opts)
                goto nla_put_failure;
 
        /* convert target from pschedtime to us */
        if (nla_put_u32(skb, TCA_PIE_TARGET,
-                       ((u32) PSCHED_TICKS2NS(q->params.target)) /
+                       ((u32)PSCHED_TICKS2NS(q->params.target)) /
                        NSEC_PER_USEC) ||
            nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
-           nla_put_u32(skb, TCA_PIE_TUPDATE, jiffies_to_usecs(q->params.tupdate)) ||
+           nla_put_u32(skb, TCA_PIE_TUPDATE,
+                       jiffies_to_usecs(q->params.tupdate)) ||
            nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
            nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
            nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
@@ -489,7 +489,6 @@ static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
 nla_put_failure:
        nla_nest_cancel(skb, opts);
        return -1;
-
 }
 
 static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
@@ -497,7 +496,7 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
        struct pie_sched_data *q = qdisc_priv(sch);
        struct tc_pie_xstats st = {
                .prob           = q->vars.prob,
-               .delay          = ((u32) PSCHED_TICKS2NS(q->vars.qdelay)) /
+               .delay          = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
                                   NSEC_PER_USEC,
                /* unscale and return dq_rate in bytes per sec */
                .avg_dq_rate    = q->vars.avg_dq_rate *
@@ -514,8 +513,7 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 
 static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
 {
-       struct sk_buff *skb;
-       skb = qdisc_dequeue_head(sch);
+       struct sk_buff *skb = qdisc_dequeue_head(sch);
 
        if (!skb)
                return NULL;
@@ -527,6 +525,7 @@ static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
 static void pie_reset(struct Qdisc *sch)
 {
        struct pie_sched_data *q = qdisc_priv(sch);
+
        qdisc_reset_queue(sch);
        pie_vars_init(&q->vars);
 }
@@ -534,6 +533,7 @@ static void pie_reset(struct Qdisc *sch)
 static void pie_destroy(struct Qdisc *sch)
 {
        struct pie_sched_data *q = qdisc_priv(sch);
+
        q->params.tupdate = 0;
        del_timer_sync(&q->adapt_timer);
 }
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
new file mode 100644 (file)
index 0000000..206e4db
--- /dev/null
@@ -0,0 +1,962 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* net/sched/sch_taprio.c       Time Aware Priority Scheduler
+ *
+ * Authors:    Vinicius Costa Gomes <vinicius.gomes@intel.com>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
+#include <net/sch_generic.h>
+
+#define TAPRIO_ALL_GATES_OPEN -1
+
+struct sched_entry {
+       struct list_head list;
+
+       /* The instant that this entry "closes" and the next one
+        * should open, the qdisc will make some effort so that no
+        * packet leaves after this time.
+        */
+       ktime_t close_time;
+       atomic_t budget;
+       int index;
+       u32 gate_mask;
+       u32 interval;
+       u8 command;
+};
+
+struct taprio_sched {
+       struct Qdisc **qdiscs;
+       struct Qdisc *root;
+       s64 base_time;
+       int clockid;
+       int picos_per_byte; /* Using picoseconds because for 10Gbps+
+                            * speeds it's sub-nanoseconds per byte
+                            */
+       size_t num_entries;
+
+       /* Protects the update side of the RCU protected current_entry */
+       spinlock_t current_entry_lock;
+       struct sched_entry __rcu *current_entry;
+       struct list_head entries;
+       ktime_t (*get_time)(void);
+       struct hrtimer advance_timer;
+};
+
+static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+                         struct sk_buff **to_free)
+{
+       struct taprio_sched *q = qdisc_priv(sch);
+       struct Qdisc *child;
+       int queue;
+
+       queue = skb_get_queue_mapping(skb);
+
+       child = q->qdiscs[queue];
+       if (unlikely(!child))
+               return qdisc_drop(skb, sch, to_free);
+
+       qdisc_qstats_backlog_inc(sch, skb);
+       sch->q.qlen++;
+
+       return qdisc_enqueue(skb, child, to_free);
+}
+
+static struct sk_buff *taprio_peek(struct Qdisc *sch)
+{
+       struct taprio_sched *q = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+       struct sched_entry *entry;
+       struct sk_buff *skb;
+       u32 gate_mask;
+       int i;
+
+       rcu_read_lock();
+       entry = rcu_dereference(q->current_entry);
+       gate_mask = entry ? entry->gate_mask : -1;
+       rcu_read_unlock();
+
+       if (!gate_mask)
+               return NULL;
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               struct Qdisc *child = q->qdiscs[i];
+               int prio;
+               u8 tc;
+
+               if (unlikely(!child))
+                       continue;
+
+               skb = child->ops->peek(child);
+               if (!skb)
+                       continue;
+
+               prio = skb->priority;
+               tc = netdev_get_prio_tc_map(dev, prio);
+
+               if (!(gate_mask & BIT(tc)))
+                       return NULL;
+
+               return skb;
+       }
+
+       return NULL;
+}
+
+static inline int length_to_duration(struct taprio_sched *q, int len)
+{
+       return (len * q->picos_per_byte) / 1000;
+}
+
+static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
+{
+       struct taprio_sched *q = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+       struct sched_entry *entry;
+       struct sk_buff *skb;
+       u32 gate_mask;
+       int i;
+
+       rcu_read_lock();
+       entry = rcu_dereference(q->current_entry);
+       /* if there's no entry, it means that the schedule didn't
+        * start yet, so force all gates to be open, this is in
+        * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
+        * "AdminGateSates"
+        */
+       gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
+       rcu_read_unlock();
+
+       if (!gate_mask)
+               return NULL;
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               struct Qdisc *child = q->qdiscs[i];
+               ktime_t guard;
+               int prio;
+               int len;
+               u8 tc;
+
+               if (unlikely(!child))
+                       continue;
+
+               skb = child->ops->peek(child);
+               if (!skb)
+                       continue;
+
+               prio = skb->priority;
+               tc = netdev_get_prio_tc_map(dev, prio);
+
+               if (!(gate_mask & BIT(tc)))
+                       continue;
+
+               len = qdisc_pkt_len(skb);
+               guard = ktime_add_ns(q->get_time(),
+                                    length_to_duration(q, len));
+
+               /* In the case that there's no gate entry, there's no
+                * guard band ...
+                */
+               if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
+                   ktime_after(guard, entry->close_time))
+                       return NULL;
+
+               /* ... and no budget. */
+               if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
+                   atomic_sub_return(len, &entry->budget) < 0)
+                       return NULL;
+
+               skb = child->ops->dequeue(child);
+               if (unlikely(!skb))
+                       return NULL;
+
+               qdisc_bstats_update(sch, skb);
+               qdisc_qstats_backlog_dec(sch, skb);
+               sch->q.qlen--;
+
+               return skb;
+       }
+
+       return NULL;
+}
+
+static bool should_restart_cycle(const struct taprio_sched *q,
+                                const struct sched_entry *entry)
+{
+       WARN_ON(!entry);
+
+       return list_is_last(&entry->list, &q->entries);
+}
+
+static enum hrtimer_restart advance_sched(struct hrtimer *timer)
+{
+       struct taprio_sched *q = container_of(timer, struct taprio_sched,
+                                             advance_timer);
+       struct sched_entry *entry, *next;
+       struct Qdisc *sch = q->root;
+       ktime_t close_time;
+
+       spin_lock(&q->current_entry_lock);
+       entry = rcu_dereference_protected(q->current_entry,
+                                         lockdep_is_held(&q->current_entry_lock));
+
+       /* This is the case that it's the first time that the schedule
+        * runs, so it only happens once per schedule. The first entry
+        * is pre-calculated during the schedule initialization.
+        */
+       if (unlikely(!entry)) {
+               next = list_first_entry(&q->entries, struct sched_entry,
+                                       list);
+               close_time = next->close_time;
+               goto first_run;
+       }
+
+       if (should_restart_cycle(q, entry))
+               next = list_first_entry(&q->entries, struct sched_entry,
+                                       list);
+       else
+               next = list_next_entry(entry, list);
+
+       close_time = ktime_add_ns(entry->close_time, next->interval);
+
+       next->close_time = close_time;
+       atomic_set(&next->budget,
+                  (next->interval * 1000) / q->picos_per_byte);
+
+first_run:
+       rcu_assign_pointer(q->current_entry, next);
+       spin_unlock(&q->current_entry_lock);
+
+       hrtimer_set_expires(&q->advance_timer, close_time);
+
+       rcu_read_lock();
+       __netif_schedule(sch);
+       rcu_read_unlock();
+
+       return HRTIMER_RESTART;
+}
+
+static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
+       [TCA_TAPRIO_SCHED_ENTRY_INDEX]     = { .type = NLA_U32 },
+       [TCA_TAPRIO_SCHED_ENTRY_CMD]       = { .type = NLA_U8 },
+       [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
+       [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]  = { .type = NLA_U32 },
+};
+
+static const struct nla_policy entry_list_policy[TCA_TAPRIO_SCHED_MAX + 1] = {
+       [TCA_TAPRIO_SCHED_ENTRY] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
+       [TCA_TAPRIO_ATTR_PRIOMAP]              = {
+               .len = sizeof(struct tc_mqprio_qopt)
+       },
+       [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]     = { .type = NLA_NESTED },
+       [TCA_TAPRIO_ATTR_SCHED_BASE_TIME]      = { .type = NLA_S64 },
+       [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]   = { .type = NLA_NESTED },
+       [TCA_TAPRIO_ATTR_SCHED_CLOCKID]        = { .type = NLA_S32 },
+};
+
+static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
+                           struct netlink_ext_ack *extack)
+{
+       u32 interval = 0;
+
+       if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
+               entry->command = nla_get_u8(
+                       tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
+
+       if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
+               entry->gate_mask = nla_get_u32(
+                       tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
+
+       if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
+               interval = nla_get_u32(
+                       tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
+
+       if (interval == 0) {
+               NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
+               return -EINVAL;
+       }
+
+       entry->interval = interval;
+
+       return 0;
+}
+
+static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
+                            int index, struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
+       int err;
+
+       err = nla_parse_nested(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
+                              entry_policy, NULL);
+       if (err < 0) {
+               NL_SET_ERR_MSG(extack, "Could not parse nested entry");
+               return -EINVAL;
+       }
+
+       entry->index = index;
+
+       return fill_sched_entry(tb, entry, extack);
+}
+
+/* Returns the number of entries in case of success */
+static int parse_sched_single_entry(struct nlattr *n,
+                                   struct taprio_sched *q,
+                                   struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb_entry[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
+       struct nlattr *tb_list[TCA_TAPRIO_SCHED_MAX + 1] = { };
+       struct sched_entry *entry;
+       bool found = false;
+       u32 index;
+       int err;
+
+       err = nla_parse_nested(tb_list, TCA_TAPRIO_SCHED_MAX,
+                              n, entry_list_policy, NULL);
+       if (err < 0) {
+               NL_SET_ERR_MSG(extack, "Could not parse nested entry");
+               return -EINVAL;
+       }
+
+       if (!tb_list[TCA_TAPRIO_SCHED_ENTRY]) {
+               NL_SET_ERR_MSG(extack, "Single-entry must include an entry");
+               return -EINVAL;
+       }
+
+       err = nla_parse_nested(tb_entry, TCA_TAPRIO_SCHED_ENTRY_MAX,
+                              tb_list[TCA_TAPRIO_SCHED_ENTRY],
+                              entry_policy, NULL);
+       if (err < 0) {
+               NL_SET_ERR_MSG(extack, "Could not parse nested entry");
+               return -EINVAL;
+       }
+
+       if (!tb_entry[TCA_TAPRIO_SCHED_ENTRY_INDEX]) {
+               NL_SET_ERR_MSG(extack, "Entry must specify an index\n");
+               return -EINVAL;
+       }
+
+       index = nla_get_u32(tb_entry[TCA_TAPRIO_SCHED_ENTRY_INDEX]);
+       if (index >= q->num_entries) {
+               NL_SET_ERR_MSG(extack, "Index for single entry exceeds number of entries in schedule");
+               return -EINVAL;
+       }
+
+       list_for_each_entry(entry, &q->entries, list) {
+               if (entry->index == index) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (!found) {
+               NL_SET_ERR_MSG(extack, "Could not find entry");
+               return -ENOENT;
+       }
+
+       err = fill_sched_entry(tb_entry, entry, extack);
+       if (err < 0)
+               return err;
+
+       return q->num_entries;
+}
+
+static int parse_sched_list(struct nlattr *list,
+                           struct taprio_sched *q,
+                           struct netlink_ext_ack *extack)
+{
+       struct nlattr *n;
+       int err, rem;
+       int i = 0;
+
+       if (!list)
+               return -EINVAL;
+
+       nla_for_each_nested(n, list, rem) {
+               struct sched_entry *entry;
+
+               if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
+                       NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
+                       continue;
+               }
+
+               entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+               if (!entry) {
+                       NL_SET_ERR_MSG(extack, "Not enough memory for entry");
+                       return -ENOMEM;
+               }
+
+               err = parse_sched_entry(n, entry, i, extack);
+               if (err < 0) {
+                       kfree(entry);
+                       return err;
+               }
+
+               list_add_tail(&entry->list, &q->entries);
+               i++;
+       }
+
+       q->num_entries = i;
+
+       return i;
+}
+
+/* Returns the number of entries in case of success */
+static int parse_taprio_opt(struct nlattr **tb, struct taprio_sched *q,
+                           struct netlink_ext_ack *extack)
+{
+       int err = 0;
+       int clockid;
+
+       if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] &&
+           tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY])
+               return -EINVAL;
+
+       if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] && q->num_entries == 0)
+               return -EINVAL;
+
+       if (q->clockid == -1 && !tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID])
+               return -EINVAL;
+
+       if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
+               q->base_time = nla_get_s64(
+                       tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
+
+       if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
+               clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
+
+               /* We only support static clockids and we don't allow
+                * for it to be modified after the first init.
+                */
+               if (clockid < 0 || (q->clockid != -1 && q->clockid != clockid))
+                       return -EINVAL;
+
+               q->clockid = clockid;
+       }
+
+       if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
+               err = parse_sched_list(
+                       tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], q, extack);
+       else if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY])
+               err = parse_sched_single_entry(
+                       tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY], q, extack);
+
+       /* parse_sched_* return the number of entries in the schedule,
+        * a schedule with zero entries is an error.
+        */
+       if (err == 0) {
+               NL_SET_ERR_MSG(extack, "The schedule should contain at least one entry");
+               return -EINVAL;
+       }
+
+       return err;
+}
+
+static int taprio_parse_mqprio_opt(struct net_device *dev,
+                                  struct tc_mqprio_qopt *qopt,
+                                  struct netlink_ext_ack *extack)
+{
+       int i, j;
+
+       if (!qopt) {
+               NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
+               return -EINVAL;
+       }
+
+       /* Verify num_tc is not out of max range */
+       if (qopt->num_tc > TC_MAX_QUEUE) {
+               NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
+               return -EINVAL;
+       }
+
+       /* taprio imposes that traffic classes map 1:n to tx queues */
+       if (qopt->num_tc > dev->num_tx_queues) {
+               NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
+               return -EINVAL;
+       }
+
+       /* Verify priority mapping uses valid tcs */
+       for (i = 0; i < TC_BITMASK + 1; i++) {
+               if (qopt->prio_tc_map[i] >= qopt->num_tc) {
+                       NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
+                       return -EINVAL;
+               }
+       }
+
+       for (i = 0; i < qopt->num_tc; i++) {
+               unsigned int last = qopt->offset[i] + qopt->count[i];
+
+               /* Verify the queue count is in tx range being equal to the
+                * real_num_tx_queues indicates the last queue is in use.
+                */
+               if (qopt->offset[i] >= dev->num_tx_queues ||
+                   !qopt->count[i] ||
+                   last > dev->real_num_tx_queues) {
+                       NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
+                       return -EINVAL;
+               }
+
+               /* Verify that the offset and counts do not overlap */
+               for (j = i + 1; j < qopt->num_tc; j++) {
+                       if (last > qopt->offset[j]) {
+                               NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
+                               return -EINVAL;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static ktime_t taprio_get_start_time(struct Qdisc *sch)
+{
+       struct taprio_sched *q = qdisc_priv(sch);
+       struct sched_entry *entry;
+       ktime_t now, base, cycle;
+       s64 n;
+
+       base = ns_to_ktime(q->base_time);
+       cycle = 0;
+
+       /* Calculate the cycle_time, by summing all the intervals.
+        */
+       list_for_each_entry(entry, &q->entries, list)
+               cycle = ktime_add_ns(cycle, entry->interval);
+
+       if (!cycle)
+               return base;
+
+       now = q->get_time();
+
+       if (ktime_after(base, now))
+               return base;
+
+       /* Schedule the start time for the beginning of the next
+        * cycle.
+        */
+       n = div64_s64(ktime_sub_ns(now, base), cycle);
+
+       return ktime_add_ns(base, (n + 1) * cycle);
+}
+
+static void taprio_start_sched(struct Qdisc *sch, ktime_t start)
+{
+       struct taprio_sched *q = qdisc_priv(sch);
+       struct sched_entry *first;
+       unsigned long flags;
+
+       spin_lock_irqsave(&q->current_entry_lock, flags);
+
+       first = list_first_entry(&q->entries, struct sched_entry,
+                                list);
+
+       first->close_time = ktime_add_ns(start, first->interval);
+       atomic_set(&first->budget,
+                  (first->interval * 1000) / q->picos_per_byte);
+       rcu_assign_pointer(q->current_entry, NULL);
+
+       spin_unlock_irqrestore(&q->current_entry_lock, flags);
+
+       hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
+}
+
+static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
+                        struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
+       struct taprio_sched *q = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+       struct tc_mqprio_qopt *mqprio = NULL;
+       struct ethtool_link_ksettings ecmd;
+       int i, err, size;
+       s64 link_speed;
+       ktime_t start;
+
+       err = nla_parse_nested(tb, TCA_TAPRIO_ATTR_MAX, opt,
+                              taprio_policy, extack);
+       if (err < 0)
+               return err;
+
+       err = -EINVAL;
+       if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
+               mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
+
+       err = taprio_parse_mqprio_opt(dev, mqprio, extack);
+       if (err < 0)
+               return err;
+
+       /* A schedule with less than one entry is an error */
+       size = parse_taprio_opt(tb, q, extack);
+       if (size < 0)
+               return size;
+
+       hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
+       q->advance_timer.function = advance_sched;
+
+       switch (q->clockid) {
+       case CLOCK_REALTIME:
+               q->get_time = ktime_get_real;
+               break;
+       case CLOCK_MONOTONIC:
+               q->get_time = ktime_get;
+               break;
+       case CLOCK_BOOTTIME:
+               q->get_time = ktime_get_boottime;
+               break;
+       case CLOCK_TAI:
+               q->get_time = ktime_get_clocktai;
+               break;
+       default:
+               return -ENOTSUPP;
+       }
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               struct netdev_queue *dev_queue;
+               struct Qdisc *qdisc;
+
+               dev_queue = netdev_get_tx_queue(dev, i);
+               qdisc = qdisc_create_dflt(dev_queue,
+                                         &pfifo_qdisc_ops,
+                                         TC_H_MAKE(TC_H_MAJ(sch->handle),
+                                                   TC_H_MIN(i + 1)),
+                                         extack);
+               if (!qdisc)
+                       return -ENOMEM;
+
+               if (i < dev->real_num_tx_queues)
+                       qdisc_hash_add(qdisc, false);
+
+               q->qdiscs[i] = qdisc;
+       }
+
+       if (mqprio) {
+               netdev_set_num_tc(dev, mqprio->num_tc);
+               for (i = 0; i < mqprio->num_tc; i++)
+                       netdev_set_tc_queue(dev, i,
+                                           mqprio->count[i],
+                                           mqprio->offset[i]);
+
+               /* Always use supplied priority mappings */
+               for (i = 0; i < TC_BITMASK + 1; i++)
+                       netdev_set_prio_tc_map(dev, i,
+                                              mqprio->prio_tc_map[i]);
+       }
+
+       if (!__ethtool_get_link_ksettings(dev, &ecmd))
+               link_speed = ecmd.base.speed;
+       else
+               link_speed = SPEED_1000;
+
+       q->picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
+                                     link_speed * 1000 * 1000);
+
+       start = taprio_get_start_time(sch);
+       if (!start)
+               return 0;
+
+       taprio_start_sched(sch, start);
+
+       return 0;
+}
+
+static void taprio_destroy(struct Qdisc *sch)
+{
+       struct taprio_sched *q = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+       struct sched_entry *entry, *n;
+       unsigned int i;
+
+       hrtimer_cancel(&q->advance_timer);
+
+       if (q->qdiscs) {
+               for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
+                       qdisc_put(q->qdiscs[i]);
+
+               kfree(q->qdiscs);
+       }
+       q->qdiscs = NULL;
+
+       netdev_set_num_tc(dev, 0);
+
+       list_for_each_entry_safe(entry, n, &q->entries, list) {
+               list_del(&entry->list);
+               kfree(entry);
+       }
+}
+
+static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
+{
+       struct taprio_sched *q = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+
+       INIT_LIST_HEAD(&q->entries);
+       spin_lock_init(&q->current_entry_lock);
+
+       /* We may overwrite the configuration later */
+       hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
+
+       q->root = sch;
+
+       /* We only support static clockids. Use an invalid value as default
+        * and get the valid one on taprio_change().
+        */
+       q->clockid = -1;
+
+       if (sch->parent != TC_H_ROOT)
+               return -EOPNOTSUPP;
+
+       if (!netif_is_multiqueue(dev))
+               return -EOPNOTSUPP;
+
+       /* pre-allocate qdisc, attachment can't fail */
+       q->qdiscs = kcalloc(dev->num_tx_queues,
+                           sizeof(q->qdiscs[0]),
+                           GFP_KERNEL);
+
+       if (!q->qdiscs)
+               return -ENOMEM;
+
+       if (!opt)
+               return -EINVAL;
+
+       return taprio_change(sch, opt, extack);
+}
+
+static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
+                                            unsigned long cl)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       unsigned long ntx = cl - 1;
+
+       if (ntx >= dev->num_tx_queues)
+               return NULL;
+
+       return netdev_get_tx_queue(dev, ntx);
+}
+
+static int taprio_graft(struct Qdisc *sch, unsigned long cl,
+                       struct Qdisc *new, struct Qdisc **old,
+                       struct netlink_ext_ack *extack)
+{
+       struct taprio_sched *q = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+       struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
+
+       if (!dev_queue)
+               return -EINVAL;
+
+       if (dev->flags & IFF_UP)
+               dev_deactivate(dev);
+
+       *old = q->qdiscs[cl - 1];
+       q->qdiscs[cl - 1] = new;
+
+       if (new)
+               new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+
+       if (dev->flags & IFF_UP)
+               dev_activate(dev);
+
+       return 0;
+}
+
+static int dump_entry(struct sk_buff *msg,
+                     const struct sched_entry *entry)
+{
+       struct nlattr *item;
+
+       item = nla_nest_start(msg, TCA_TAPRIO_SCHED_ENTRY);
+       if (!item)
+               return -ENOSPC;
+
+       if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
+               goto nla_put_failure;
+
+       if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
+               goto nla_put_failure;
+
+       if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
+                       entry->gate_mask))
+               goto nla_put_failure;
+
+       if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
+                       entry->interval))
+               goto nla_put_failure;
+
+       return nla_nest_end(msg, item);
+
+nla_put_failure:
+       nla_nest_cancel(msg, item);
+       return -1;
+}
+
+static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+       struct taprio_sched *q = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+       struct tc_mqprio_qopt opt = { 0 };
+       struct nlattr *nest, *entry_list;
+       struct sched_entry *entry;
+       unsigned int i;
+
+       opt.num_tc = netdev_get_num_tc(dev);
+       memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
+
+       for (i = 0; i < netdev_get_num_tc(dev); i++) {
+               opt.count[i] = dev->tc_to_txq[i].count;
+               opt.offset[i] = dev->tc_to_txq[i].offset;
+       }
+
+       nest = nla_nest_start(skb, TCA_OPTIONS);
+       if (!nest)
+               return -ENOSPC;
+
+       if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
+               goto options_error;
+
+       if (nla_put_s64(skb, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
+                       q->base_time, TCA_TAPRIO_PAD))
+               goto options_error;
+
+       if (nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
+               goto options_error;
+
+       entry_list = nla_nest_start(skb, TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
+       if (!entry_list)
+               goto options_error;
+
+       list_for_each_entry(entry, &q->entries, list) {
+               if (dump_entry(skb, entry) < 0)
+                       goto options_error;
+       }
+
+       nla_nest_end(skb, entry_list);
+
+       return nla_nest_end(skb, nest);
+
+options_error:
+       nla_nest_cancel(skb, nest);
+       return -1;
+}
+
+static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
+{
+       struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
+
+       if (!dev_queue)
+               return NULL;
+
+       return dev_queue->qdisc_sleeping;
+}
+
+static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
+{
+       unsigned int ntx = TC_H_MIN(classid);
+
+       if (!taprio_queue_get(sch, ntx))
+               return 0;
+       return ntx;
+}
+
+static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
+                            struct sk_buff *skb, struct tcmsg *tcm)
+{
+       struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
+
+       tcm->tcm_parent = TC_H_ROOT;
+       tcm->tcm_handle |= TC_H_MIN(cl);
+       tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+
+       return 0;
+}
+
+static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+                                  struct gnet_dump *d)
+       __releases(d->lock)
+       __acquires(d->lock)
+{
+       struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
+
+       sch = dev_queue->qdisc_sleeping;
+       if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
+           gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
+               return -1;
+       return 0;
+}
+
+static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       unsigned long ntx;
+
+       if (arg->stop)
+               return;
+
+       arg->count = arg->skip;
+       for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
+               if (arg->fn(sch, ntx + 1, arg) < 0) {
+                       arg->stop = 1;
+                       break;
+               }
+               arg->count++;
+       }
+}
+
+static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
+                                               struct tcmsg *tcm)
+{
+       return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
+}
+
+static const struct Qdisc_class_ops taprio_class_ops = {
+       .graft          = taprio_graft,
+       .leaf           = taprio_leaf,
+       .find           = taprio_find,
+       .walk           = taprio_walk,
+       .dump           = taprio_dump_class,
+       .dump_stats     = taprio_dump_class_stats,
+       .select_queue   = taprio_select_queue,
+};
+
+static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
+       .cl_ops         = &taprio_class_ops,
+       .id             = "taprio",
+       .priv_size      = sizeof(struct taprio_sched),
+       .init           = taprio_init,
+       .destroy        = taprio_destroy,
+       .peek           = taprio_peek,
+       .dequeue        = taprio_dequeue,
+       .enqueue        = taprio_enqueue,
+       .dump           = taprio_dump,
+       .owner          = THIS_MODULE,
+};
+
+static int __init taprio_module_init(void)
+{
+       return register_qdisc(&taprio_qdisc_ops);
+}
+
+static void __exit taprio_module_exit(void)
+{
+       unregister_qdisc(&taprio_qdisc_ops);
+}
+
+module_init(taprio_module_init);
+module_exit(taprio_module_exit);
+MODULE_LICENSE("GPL");
index d74d00b299421a940766f157bf8b48038b827cd1..42191ed9902b8dd38ad41b6221bd4210427b193b 100644 (file)
@@ -1048,7 +1048,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
                if (!ctx->packet || !ctx->packet->has_cookie_echo)
                        return;
 
-               /* fallthru */
+               /* fall through */
        case SCTP_STATE_ESTABLISHED:
        case SCTP_STATE_SHUTDOWN_PENDING:
        case SCTP_STATE_SHUTDOWN_RECEIVED:
index 01f3f8f32d6f927fd77bac5920ab5d8339c2f6af..713dc4833d40c729ff422e70ffde5ddee626c3bd 100644 (file)
@@ -1475,7 +1475,7 @@ int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (sock) {
                err = move_addr_to_kernel(umyaddr, addrlen, &address);
-               if (err >= 0) {
+               if (!err) {
                        err = security_socket_bind(sock,
                                                   (struct sockaddr *)&address,
                                                   addrlen);
index 91891041e5e1bd905b30af890ba1f807aa88cb87..e65c3a8551e4d1a139015c02ded91fcf55ea8d88 100644 (file)
@@ -609,16 +609,18 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
 
        switch (evt) {
        case NETDEV_CHANGE:
-               if (netif_carrier_ok(dev))
+               if (netif_carrier_ok(dev) && netif_oper_up(dev)) {
+                       test_and_set_bit_lock(0, &b->up);
                        break;
-               /* else: fall through */
-       case NETDEV_UP:
-               test_and_set_bit_lock(0, &b->up);
-               break;
+               }
+               /* fall through */
        case NETDEV_GOING_DOWN:
                clear_bit_unlock(0, &b->up);
                tipc_reset_bearer(net, b);
                break;
+       case NETDEV_UP:
+               test_and_set_bit_lock(0, &b->up);
+               break;
        case NETDEV_CHANGEMTU:
                if (tipc_mtu_bad(dev, 0)) {
                        bearer_disable(net, b);
index b1f0bee54eacc9eb1974169853abf1ace4df2733..fb886b525d950e18f7ef517bac408272d17e8d4e 100644 (file)
@@ -410,6 +410,11 @@ char *tipc_link_name(struct tipc_link *l)
        return l->name;
 }
 
+u32 tipc_link_state(struct tipc_link *l)
+{
+       return l->state;
+}
+
 /**
  * tipc_link_create - create a new link
  * @n: pointer to associated node
@@ -841,9 +846,14 @@ void tipc_link_reset(struct tipc_link *l)
        l->in_session = false;
        l->session++;
        l->mtu = l->advertised_mtu;
+       spin_lock_bh(&l->wakeupq.lock);
+       spin_lock_bh(&l->inputq->lock);
+       skb_queue_splice_init(&l->wakeupq, l->inputq);
+       spin_unlock_bh(&l->inputq->lock);
+       spin_unlock_bh(&l->wakeupq.lock);
+
        __skb_queue_purge(&l->transmq);
        __skb_queue_purge(&l->deferdq);
-       skb_queue_splice_init(&l->wakeupq, l->inputq);
        __skb_queue_purge(&l->backlogq);
        l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
        l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
@@ -1380,6 +1390,36 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
        __skb_queue_tail(xmitq, skb);
 }
 
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
+                                   struct sk_buff_head *xmitq)
+{
+       u32 onode = tipc_own_addr(l->net);
+       struct tipc_msg *hdr, *ihdr;
+       struct sk_buff_head tnlq;
+       struct sk_buff *skb;
+       u32 dnode = l->addr;
+
+       skb_queue_head_init(&tnlq);
+       skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
+                             INT_H_SIZE, BASIC_H_SIZE,
+                             dnode, onode, 0, 0, 0);
+       if (!skb) {
+               pr_warn("%sunable to create tunnel packet\n", link_co_err);
+               return;
+       }
+
+       hdr = buf_msg(skb);
+       msg_set_msgcnt(hdr, 1);
+       msg_set_bearer_id(hdr, l->peer_bearer_id);
+
+       ihdr = (struct tipc_msg *)msg_data(hdr);
+       tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+                     BASIC_H_SIZE, dnode);
+       msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
+       __skb_queue_tail(&tnlq, skb);
+       tipc_link_xmit(l, &tnlq, xmitq);
+}
+
 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
  * with contents of the link's transmit and backlog queues.
  */
@@ -1476,6 +1516,9 @@ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
                        return false;
                if (session != curr_session)
                        return false;
+               /* Extra sanity check */
+               if (!link_is_up(l) && msg_ack(hdr))
+                       return false;
                if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
                        return true;
                /* Accept only STATE with new sequence number */
index 7bc494a33fdf1c3cdf8feb04b44db7e6e04a349c..90488c538a4e4edaddfaf441a517f8b29c1d2ebc 100644 (file)
@@ -88,6 +88,8 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
                         struct tipc_link **link);
 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
                           int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *tnl,
+                                   struct sk_buff_head *xmitq);
 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
 int tipc_link_fsm_evt(struct tipc_link *l, int evt);
 bool tipc_link_is_up(struct tipc_link *l);
@@ -107,6 +109,7 @@ u16 tipc_link_rcv_nxt(struct tipc_link *l);
 u16 tipc_link_acked(struct tipc_link *l);
 u32 tipc_link_id(struct tipc_link *l);
 char *tipc_link_name(struct tipc_link *l);
+u32 tipc_link_state(struct tipc_link *l);
 char tipc_link_plane(struct tipc_link *l);
 int tipc_link_prio(struct tipc_link *l);
 int tipc_link_window(struct tipc_link *l);
index b61891054709597279d6204885a069b848dc869a..f48e5857210ff0f6ce0b169c582654823ad7e0d3 100644 (file)
@@ -499,54 +499,56 @@ bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
 /**
  * tipc_msg_reverse(): swap source and destination addresses and add error code
  * @own_node: originating node id for reversed message
- * @skb:  buffer containing message to be reversed; may be replaced.
+ * @skb:  buffer containing message to be reversed; will be consumed
  * @err:  error code to be set in message, if any
- * Consumes buffer at failure
+ * Replaces consumed buffer with new one when successful
  * Returns true if success, otherwise false
  */
 bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
 {
        struct sk_buff *_skb = *skb;
-       struct tipc_msg *hdr;
-       struct tipc_msg ohdr;
-       int dlen;
+       struct tipc_msg *_hdr, *hdr;
+       int hlen, dlen;
 
        if (skb_linearize(_skb))
                goto exit;
-       hdr = buf_msg(_skb);
-       dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
-       if (msg_dest_droppable(hdr))
+       _hdr = buf_msg(_skb);
+       dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
+       hlen = msg_hdr_sz(_hdr);
+
+       if (msg_dest_droppable(_hdr))
                goto exit;
-       if (msg_errcode(hdr))
+       if (msg_errcode(_hdr))
                goto exit;
 
-       /* Take a copy of original header before altering message */
-       memcpy(&ohdr, hdr, msg_hdr_sz(hdr));
-
-       /* Never return SHORT header; expand by replacing buffer if necessary */
-       if (msg_short(hdr)) {
-               *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
-               if (!*skb)
-                       goto exit;
-               memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
-               kfree_skb(_skb);
-               _skb = *skb;
-               hdr = buf_msg(_skb);
-               memcpy(hdr, &ohdr, BASIC_H_SIZE);
-               msg_set_hdr_sz(hdr, BASIC_H_SIZE);
-       }
+       /* Never return SHORT header */
+       if (hlen == SHORT_H_SIZE)
+               hlen = BASIC_H_SIZE;
+
+       /* Don't return data along with SYN+, - sender has a clone */
+       if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
+               dlen = 0;
+
+       /* Allocate new buffer to return */
+       *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
+       if (!*skb)
+               goto exit;
+       memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
+       memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
 
-       /* Now reverse the concerned fields */
+       /* Build reverse header in new buffer */
+       hdr = buf_msg(*skb);
+       msg_set_hdr_sz(hdr, hlen);
        msg_set_errcode(hdr, err);
        msg_set_non_seq(hdr, 0);
-       msg_set_origport(hdr, msg_destport(&ohdr));
-       msg_set_destport(hdr, msg_origport(&ohdr));
-       msg_set_destnode(hdr, msg_prevnode(&ohdr));
+       msg_set_origport(hdr, msg_destport(_hdr));
+       msg_set_destport(hdr, msg_origport(_hdr));
+       msg_set_destnode(hdr, msg_prevnode(_hdr));
        msg_set_prevnode(hdr, own_node);
        msg_set_orignode(hdr, own_node);
-       msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
-       skb_trim(_skb, msg_size(hdr));
+       msg_set_size(hdr, hlen + dlen);
        skb_orphan(_skb);
+       kfree_skb(_skb);
        return true;
 exit:
        kfree_skb(_skb);
@@ -554,6 +556,22 @@ exit:
        return false;
 }
 
+bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
+{
+       struct sk_buff *skb, *_skb;
+
+       skb_queue_walk(msg, skb) {
+               _skb = skb_clone(skb, GFP_ATOMIC);
+               if (!_skb) {
+                       __skb_queue_purge(cpy);
+                       pr_err_ratelimited("Failed to clone buffer chain\n");
+                       return false;
+               }
+               __skb_queue_tail(cpy, _skb);
+       }
+       return true;
+}
+
 /**
  * tipc_msg_lookup_dest(): try to find new destination for named message
  * @skb: the buffer containing the message.
index a4e944d593942042ce1b506d42a4c585e8aafc62..a2879e6ec5b69500e68390d0d2c9984409028ae7 100644 (file)
@@ -216,6 +216,16 @@ static inline void msg_set_non_seq(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 0, 20, 1, n);
 }
 
+static inline int msg_is_syn(struct tipc_msg *m)
+{
+       return msg_bits(m, 0, 17, 1);
+}
+
+static inline void msg_set_syn(struct tipc_msg *m, u32 d)
+{
+       msg_set_bits(m, 0, 17, 1, d);
+}
+
 static inline int msg_dest_droppable(struct tipc_msg *m)
 {
        return msg_bits(m, 0, 19, 1);
@@ -970,6 +980,7 @@ bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
                        struct sk_buff_head *cpy);
 void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
                             struct sk_buff *skb);
+bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy);
 
 static inline u16 buf_seqno(struct sk_buff *skb)
 {
index 68014f1b69765269236ac0a6d839ca754d1235da..2afc4f8c37a74db4896508283f434909a0151732 100644 (file)
@@ -111,6 +111,7 @@ struct tipc_node {
        int action_flags;
        struct list_head list;
        int state;
+       bool failover_sent;
        u16 sync_point;
        int link_cnt;
        u16 working_links;
@@ -680,6 +681,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
                *slot0 = bearer_id;
                *slot1 = bearer_id;
                tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
+               n->failover_sent = false;
                n->action_flags |= TIPC_NOTIFY_NODE_UP;
                tipc_link_set_active(nl, true);
                tipc_bcast_add_peer(n->net, nl, xmitq);
@@ -911,6 +913,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
        bool reset = true;
        char *if_name;
        unsigned long intv;
+       u16 session;
 
        *dupl_addr = false;
        *respond = false;
@@ -997,9 +1000,10 @@ void tipc_node_check_dest(struct net *net, u32 addr,
                        goto exit;
 
                if_name = strchr(b->name, ':') + 1;
+               get_random_bytes(&session, sizeof(u16));
                if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
                                      b->net_plane, b->mtu, b->priority,
-                                     b->window, mod(tipc_net(net)->random),
+                                     b->window, session,
                                      tipc_own_addr(net), addr, peer_id,
                                      n->capabilities,
                                      tipc_bc_sndlink(n->net), n->bc_entry.link,
@@ -1615,6 +1619,14 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
                        tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
                                                        tipc_link_inputq(l));
                }
+               /* If parallel link was already down, and this happened before
+                * the tunnel link came up, FAILOVER was never sent. Ensure that
+                * FAILOVER is sent to get peer out of NODE_FAILINGOVER state.
+                */
+               if (n->state != NODE_FAILINGOVER && !n->failover_sent) {
+                       tipc_link_create_dummy_tnl_msg(l, xmitq);
+                       n->failover_sent = true;
+               }
                /* If pkts arrive out of order, use lowest calculated syncpt */
                if (less(syncpt, n->sync_point))
                        n->sync_point = syncpt;
index 48b3298a248d493e083cb4e39adeebe3ab3f38a5..03f5efb62cfba24bfc44dc6975e0b3cf14105619 100644 (file)
@@ -45,6 +45,7 @@
 /* Optional capabilities supported by this code version
  */
 enum {
+       TIPC_SYN_BIT          = (1),
        TIPC_BCAST_SYNCH      = (1 << 1),
        TIPC_BCAST_STATE_NACK = (1 << 2),
        TIPC_BLOCK_FLOWCTL    = (1 << 3),
@@ -53,11 +54,12 @@ enum {
        TIPC_LINK_PROTO_SEQNO = (1 << 6)
 };
 
-#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH       |  \
-                               TIPC_BCAST_STATE_NACK  |  \
-                               TIPC_BCAST_RCAST       |  \
-                               TIPC_BLOCK_FLOWCTL     |  \
-                               TIPC_NODE_ID128        |  \
+#define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT           |  \
+                               TIPC_BCAST_SYNCH       |   \
+                               TIPC_BCAST_STATE_NACK  |   \
+                               TIPC_BCAST_RCAST       |   \
+                               TIPC_BLOCK_FLOWCTL     |   \
+                               TIPC_NODE_ID128        |   \
                                TIPC_LINK_PROTO_SEQNO)
 #define INVALID_BEARER_ID -1
 
index 3f03ddd0e35b2f1b6acad1c788faee1976924b3b..db148c4a916a1fe64833eb5150d220bbe9072484 100644 (file)
@@ -47,7 +47,7 @@
 #include "netlink.h"
 #include "group.h"
 
-#define CONN_TIMEOUT_DEFAULT   8000    /* default connect timeout = 8s */
+#define CONN_TIMEOUT_DEFAULT    8000    /* default connect timeout = 8s */
 #define CONN_PROBING_INTV      msecs_to_jiffies(3600000)  /* [ms] => 1 h */
 #define TIPC_FWD_MSG           1
 #define TIPC_MAX_PORT          0xffffffff
@@ -80,7 +80,6 @@ struct sockaddr_pair {
  * @publications: list of publications for port
  * @blocking_link: address of the congested link we are currently sleeping on
  * @pub_count: total # of publications port has made during its lifetime
- * @probing_state:
  * @conn_timeout: the time we can wait for an unresponded setup request
  * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
  * @cong_link_cnt: number of congested links
@@ -102,8 +101,8 @@ struct tipc_sock {
        struct list_head cong_links;
        struct list_head publications;
        u32 pub_count;
-       uint conn_timeout;
        atomic_t dupl_rcvcnt;
+       u16 conn_timeout;
        bool probe_unacked;
        u16 cong_link_cnt;
        u16 snt_unacked;
@@ -507,6 +506,9 @@ static void __tipc_shutdown(struct socket *sock, int error)
        tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
                                            !tsk_conn_cong(tsk)));
 
+       /* Remove any pending SYN message */
+       __skb_queue_purge(&sk->sk_write_queue);
+
        /* Reject all unreceived messages, except on an active connection
         * (which disconnects locally & sends a 'FIN+' to peer).
         */
@@ -1319,6 +1321,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
                        tsk->conn_type = dest->addr.name.name.type;
                        tsk->conn_instance = dest->addr.name.name.instance;
                }
+               msg_set_syn(hdr, 1);
        }
 
        seq = &dest->addr.nameseq;
@@ -1361,6 +1364,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
        rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
        if (unlikely(rc != dlen))
                return rc;
+       if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
+               return -ENOMEM;
 
        rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
        if (unlikely(rc == -ELINKCONG)) {
@@ -1419,8 +1424,10 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
        /* Handle implicit connection setup */
        if (unlikely(dest)) {
                rc = __tipc_sendmsg(sock, m, dlen);
-               if (dlen && (dlen == rc))
+               if (dlen && dlen == rc) {
+                       tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
                        tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
+               }
                return rc;
        }
 
@@ -1478,6 +1485,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
        struct net *net = sock_net(sk);
        struct tipc_msg *msg = &tsk->phdr;
 
+       msg_set_syn(msg, 0);
        msg_set_destnode(msg, peer_node);
        msg_set_destport(msg, peer_port);
        msg_set_type(msg, TIPC_CONN_MSG);
@@ -1489,6 +1497,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
        tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
        tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
        tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
+       __skb_queue_purge(&sk->sk_write_queue);
        if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
                return;
 
@@ -1959,91 +1968,90 @@ static void tipc_sk_proto_rcv(struct sock *sk,
 }
 
 /**
- * tipc_filter_connect - Handle incoming message for a connection-based socket
+ * tipc_sk_filter_connect - check incoming message for a connection-based socket
  * @tsk: TIPC socket
- * @skb: pointer to message buffer. Set to NULL if buffer is consumed
- *
- * Returns true if everything ok, false otherwise
+ * @skb: pointer to message buffer.
+ * Returns true if message should be added to receive queue, false otherwise
  */
 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
 {
        struct sock *sk = &tsk->sk;
        struct net *net = sock_net(sk);
        struct tipc_msg *hdr = buf_msg(skb);
-       u32 pport = msg_origport(hdr);
-       u32 pnode = msg_orignode(hdr);
+       bool con_msg = msg_connected(hdr);
+       u32 pport = tsk_peer_port(tsk);
+       u32 pnode = tsk_peer_node(tsk);
+       u32 oport = msg_origport(hdr);
+       u32 onode = msg_orignode(hdr);
+       int err = msg_errcode(hdr);
+       unsigned long delay;
 
        if (unlikely(msg_mcast(hdr)))
                return false;
 
        switch (sk->sk_state) {
        case TIPC_CONNECTING:
-               /* Accept only ACK or NACK message */
-               if (unlikely(!msg_connected(hdr))) {
-                       if (pport != tsk_peer_port(tsk) ||
-                           pnode != tsk_peer_node(tsk))
-                               return false;
-
-                       tipc_set_sk_state(sk, TIPC_DISCONNECTING);
-                       sk->sk_err = ECONNREFUSED;
-                       sk->sk_state_change(sk);
-                       return true;
-               }
-
-               if (unlikely(msg_errcode(hdr))) {
-                       tipc_set_sk_state(sk, TIPC_DISCONNECTING);
-                       sk->sk_err = ECONNREFUSED;
-                       sk->sk_state_change(sk);
-                       return true;
-               }
-
-               if (unlikely(!msg_isdata(hdr))) {
-                       tipc_set_sk_state(sk, TIPC_DISCONNECTING);
-                       sk->sk_err = EINVAL;
-                       sk->sk_state_change(sk);
-                       return true;
+               /* Setup ACK */
+               if (likely(con_msg)) {
+                       if (err)
+                               break;
+                       tipc_sk_finish_conn(tsk, oport, onode);
+                       msg_set_importance(&tsk->phdr, msg_importance(hdr));
+                       /* ACK+ message with data is added to receive queue */
+                       if (msg_data_sz(hdr))
+                               return true;
+                       /* Empty ACK-, - wake up sleeping connect() and drop */
+                       sk->sk_data_ready(sk);
+                       msg_set_dest_droppable(hdr, 1);
+                       return false;
                }
+               /* Ignore connectionless message if not from listening socket */
+               if (oport != pport || onode != pnode)
+                       return false;
 
-               tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
-               msg_set_importance(&tsk->phdr, msg_importance(hdr));
-
-               /* If 'ACK+' message, add to socket receive queue */
-               if (msg_data_sz(hdr))
-                       return true;
-
-               /* If empty 'ACK-' message, wake up sleeping connect() */
-               sk->sk_data_ready(sk);
+               /* Rejected SYN */
+               if (err != TIPC_ERR_OVERLOAD)
+                       break;
 
-               /* 'ACK-' message is neither accepted nor rejected: */
-               msg_set_dest_droppable(hdr, 1);
+               /* Prepare for new setup attempt if we have a SYN clone */
+               if (skb_queue_empty(&sk->sk_write_queue))
+                       break;
+               get_random_bytes(&delay, 2);
+               delay %= (tsk->conn_timeout / 4);
+               delay = msecs_to_jiffies(delay + 100);
+               sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
                return false;
-
        case TIPC_OPEN:
        case TIPC_DISCONNECTING:
-               break;
+               return false;
        case TIPC_LISTEN:
                /* Accept only SYN message */
-               if (!msg_connected(hdr) && !(msg_errcode(hdr)))
+               if (!msg_is_syn(hdr) &&
+                   tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
+                       return false;
+               if (!con_msg && !err)
                        return true;
-               break;
+               return false;
        case TIPC_ESTABLISHED:
                /* Accept only connection-based messages sent by peer */
-               if (unlikely(!tsk_peer_msg(tsk, hdr)))
+               if (likely(con_msg && !err && pport == oport && pnode == onode))
+                       return true;
+               if (!tsk_peer_msg(tsk, hdr))
                        return false;
-
-               if (unlikely(msg_errcode(hdr))) {
-                       tipc_set_sk_state(sk, TIPC_DISCONNECTING);
-                       /* Let timer expire on it's own */
-                       tipc_node_remove_conn(net, tsk_peer_node(tsk),
-                                             tsk->portid);
-                       sk->sk_state_change(sk);
-               }
+               if (!err)
+                       return true;
+               tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+               tipc_node_remove_conn(net, pnode, tsk->portid);
+               sk->sk_state_change(sk);
                return true;
        default:
                pr_err("Unknown sk_state %u\n", sk->sk_state);
        }
-
-       return false;
+       /* Abort connection setup attempt */
+       tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+       sk->sk_err = ECONNREFUSED;
+       sk->sk_state_change(sk);
+       return true;
 }
 
 /**
@@ -2545,43 +2553,78 @@ static int tipc_shutdown(struct socket *sock, int how)
        return res;
 }
 
+static void tipc_sk_check_probing_state(struct sock *sk,
+                                       struct sk_buff_head *list)
+{
+       struct tipc_sock *tsk = tipc_sk(sk);
+       u32 pnode = tsk_peer_node(tsk);
+       u32 pport = tsk_peer_port(tsk);
+       u32 self = tsk_own_node(tsk);
+       u32 oport = tsk->portid;
+       struct sk_buff *skb;
+
+       if (tsk->probe_unacked) {
+               tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+               sk->sk_err = ECONNABORTED;
+               tipc_node_remove_conn(sock_net(sk), pnode, pport);
+               sk->sk_state_change(sk);
+               return;
+       }
+       /* Prepare new probe */
+       skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
+                             pnode, self, pport, oport, TIPC_OK);
+       if (skb)
+               __skb_queue_tail(list, skb);
+       tsk->probe_unacked = true;
+       sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
+}
+
+static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
+{
+       struct tipc_sock *tsk = tipc_sk(sk);
+
+       /* Try again later if dest link is congested */
+       if (tsk->cong_link_cnt) {
+               sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
+               return;
+       }
+       /* Prepare SYN for retransmit */
+       tipc_msg_skb_clone(&sk->sk_write_queue, list);
+}
+
 static void tipc_sk_timeout(struct timer_list *t)
 {
        struct sock *sk = from_timer(sk, t, sk_timer);
        struct tipc_sock *tsk = tipc_sk(sk);
-       u32 peer_port = tsk_peer_port(tsk);
-       u32 peer_node = tsk_peer_node(tsk);
-       u32 own_node = tsk_own_node(tsk);
-       u32 own_port = tsk->portid;
-       struct net *net = sock_net(sk);
-       struct sk_buff *skb = NULL;
+       u32 pnode = tsk_peer_node(tsk);
+       struct sk_buff_head list;
+       int rc = 0;
 
+       skb_queue_head_init(&list);
        bh_lock_sock(sk);
-       if (!tipc_sk_connected(sk))
-               goto exit;
 
        /* Try again later if socket is busy */
        if (sock_owned_by_user(sk)) {
                sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
-               goto exit;
+               bh_unlock_sock(sk);
+               return;
        }
 
-       if (tsk->probe_unacked) {
-               tipc_set_sk_state(sk, TIPC_DISCONNECTING);
-               tipc_node_remove_conn(net, peer_node, peer_port);
-               sk->sk_state_change(sk);
-               goto exit;
-       }
-       /* Send new probe */
-       skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
-                             peer_node, own_node, peer_port, own_port,
-                             TIPC_OK);
-       tsk->probe_unacked = true;
-       sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
-exit:
+       if (sk->sk_state == TIPC_ESTABLISHED)
+               tipc_sk_check_probing_state(sk, &list);
+       else if (sk->sk_state == TIPC_CONNECTING)
+               tipc_sk_retry_connect(sk, &list);
+
        bh_unlock_sock(sk);
-       if (skb)
-               tipc_node_xmit_skb(net, skb, peer_node, own_port);
+
+       if (!skb_queue_empty(&list))
+               rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
+
+       /* SYN messages may cause link congestion */
+       if (rc == -ELINKCONG) {
+               tipc_dest_push(&tsk->cong_links, pnode, 0);
+               tsk->cong_link_cnt = 1;
+       }
        sock_put(sk);
 }
 
index 4c18b4dba284615870299480876b7dd78dd4bc2c..aa9fdce272b62c7fd60808e1b5c416f591e7ce64 100644 (file)
@@ -248,7 +248,7 @@ static void trim_both_sgl(struct sock *sk, int target_size)
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
        struct tls_rec *rec = ctx->open_rec;
 
-       trim_sg(sk, rec->sg_plaintext_data,
+       trim_sg(sk, &rec->sg_plaintext_data[1],
                &rec->sg_plaintext_num_elem,
                &rec->sg_plaintext_size,
                target_size);
@@ -256,7 +256,7 @@ static void trim_both_sgl(struct sock *sk, int target_size)
        if (target_size > 0)
                target_size += tls_ctx->tx.overhead_size;
 
-       trim_sg(sk, rec->sg_encrypted_data,
+       trim_sg(sk, &rec->sg_encrypted_data[1],
                &rec->sg_encrypted_num_elem,
                &rec->sg_encrypted_size,
                target_size);
@@ -270,31 +270,83 @@ static int alloc_encrypted_sg(struct sock *sk, int len)
        int rc = 0;
 
        rc = sk_alloc_sg(sk, len,
-                        rec->sg_encrypted_data, 0,
+                        &rec->sg_encrypted_data[1], 0,
                         &rec->sg_encrypted_num_elem,
                         &rec->sg_encrypted_size, 0);
 
        if (rc == -ENOSPC)
-               rec->sg_encrypted_num_elem = ARRAY_SIZE(rec->sg_encrypted_data);
+               rec->sg_encrypted_num_elem =
+                       ARRAY_SIZE(rec->sg_encrypted_data) - 1;
 
        return rc;
 }
 
-static int alloc_plaintext_sg(struct sock *sk, int len)
+static int move_to_plaintext_sg(struct sock *sk, int required_size)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
        struct tls_rec *rec = ctx->open_rec;
-       int rc = 0;
+       struct scatterlist *plain_sg = &rec->sg_plaintext_data[1];
+       struct scatterlist *enc_sg = &rec->sg_encrypted_data[1];
+       int enc_sg_idx = 0;
+       int skip, len;
 
-       rc = sk_alloc_sg(sk, len, rec->sg_plaintext_data, 0,
-                        &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size,
-                        tls_ctx->pending_open_record_frags);
+       if (rec->sg_plaintext_num_elem == MAX_SKB_FRAGS)
+               return -ENOSPC;
 
-       if (rc == -ENOSPC)
-               rec->sg_plaintext_num_elem = ARRAY_SIZE(rec->sg_plaintext_data);
+       /* We add page references worth len bytes from enc_sg at the
+        * end of plain_sg. It is guaranteed that sg_encrypted_data
+        * has enough required room (ensured by caller).
+        */
+       len = required_size - rec->sg_plaintext_size;
 
-       return rc;
+       /* Skip initial bytes in sg_encrypted_data to be able
+        * to use same offset of both plain and encrypted data.
+        */
+       skip = tls_ctx->tx.prepend_size + rec->sg_plaintext_size;
+
+       while (enc_sg_idx < rec->sg_encrypted_num_elem) {
+               if (enc_sg[enc_sg_idx].length > skip)
+                       break;
+
+               skip -= enc_sg[enc_sg_idx].length;
+               enc_sg_idx++;
+       }
+
+       /* unmark the end of plain_sg*/
+       sg_unmark_end(plain_sg + rec->sg_plaintext_num_elem - 1);
+
+       while (len) {
+               struct page *page = sg_page(&enc_sg[enc_sg_idx]);
+               int bytes = enc_sg[enc_sg_idx].length - skip;
+               int offset = enc_sg[enc_sg_idx].offset + skip;
+
+               if (bytes > len)
+                       bytes = len;
+               else
+                       enc_sg_idx++;
+
+               /* Skipping is required only one time */
+               skip = 0;
+
+               /* Increment page reference */
+               get_page(page);
+
+               sg_set_page(&plain_sg[rec->sg_plaintext_num_elem], page,
+                           bytes, offset);
+
+               sk_mem_charge(sk, bytes);
+
+               len -= bytes;
+               rec->sg_plaintext_size += bytes;
+
+               rec->sg_plaintext_num_elem++;
+
+               if (rec->sg_plaintext_num_elem == MAX_SKB_FRAGS)
+                       return -ENOSPC;
+       }
+
+       return 0;
 }
 
 static void free_sg(struct sock *sk, struct scatterlist *sg,
@@ -320,11 +372,11 @@ static void tls_free_open_rec(struct sock *sk)
        if (!rec)
                return;
 
-       free_sg(sk, rec->sg_encrypted_data,
+       free_sg(sk, &rec->sg_encrypted_data[1],
                &rec->sg_encrypted_num_elem,
                &rec->sg_encrypted_size);
 
-       free_sg(sk, rec->sg_plaintext_data,
+       free_sg(sk, &rec->sg_plaintext_data[1],
                &rec->sg_plaintext_num_elem,
                &rec->sg_plaintext_size);
 
@@ -355,7 +407,7 @@ int tls_tx_records(struct sock *sk, int flags)
                 * Remove the head of tx_list
                 */
                list_del(&rec->list);
-               free_sg(sk, rec->sg_plaintext_data,
+               free_sg(sk, &rec->sg_plaintext_data[1],
                        &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size);
 
                kfree(rec);
@@ -370,13 +422,13 @@ int tls_tx_records(struct sock *sk, int flags)
                                tx_flags = flags;
 
                        rc = tls_push_sg(sk, tls_ctx,
-                                        &rec->sg_encrypted_data[0],
+                                        &rec->sg_encrypted_data[1],
                                         0, tx_flags);
                        if (rc)
                                goto tx_err;
 
                        list_del(&rec->list);
-                       free_sg(sk, rec->sg_plaintext_data,
+                       free_sg(sk, &rec->sg_plaintext_data[1],
                                &rec->sg_plaintext_num_elem,
                                &rec->sg_plaintext_size);
 
@@ -405,16 +457,12 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
 
        rec = container_of(aead_req, struct tls_rec, aead_req);
 
-       rec->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
-       rec->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
+       rec->sg_encrypted_data[1].offset -= tls_ctx->tx.prepend_size;
+       rec->sg_encrypted_data[1].length += tls_ctx->tx.prepend_size;
 
 
-       /* Free the record if error is previously set on socket */
+       /* Check if error is previously set on socket */
        if (err || sk->sk_err) {
-               free_sg(sk, rec->sg_encrypted_data,
-                       &rec->sg_encrypted_num_elem, &rec->sg_encrypted_size);
-
-               kfree(rec);
                rec = NULL;
 
                /* If err is already set on socket, return the same code */
@@ -449,7 +497,7 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
 
        /* Schedule the transmission */
        if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
-               schedule_delayed_work(&ctx->tx_work.work, 1);
+               schedule_delayed_work(&ctx->tx_work.work, 2);
 }
 
 static int tls_do_encryption(struct sock *sk,
@@ -459,15 +507,21 @@ static int tls_do_encryption(struct sock *sk,
                             size_t data_len)
 {
        struct tls_rec *rec = ctx->open_rec;
+       struct scatterlist *plain_sg = rec->sg_plaintext_data;
+       struct scatterlist *enc_sg = rec->sg_encrypted_data;
        int rc;
 
-       rec->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
-       rec->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
+       /* Skip the first index as it contains AAD data */
+       rec->sg_encrypted_data[1].offset += tls_ctx->tx.prepend_size;
+       rec->sg_encrypted_data[1].length -= tls_ctx->tx.prepend_size;
+
+       /* If it is inplace crypto, then pass same SG list as both src, dst */
+       if (rec->inplace_crypto)
+               plain_sg = enc_sg;
 
        aead_request_set_tfm(aead_req, ctx->aead_send);
        aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
-       aead_request_set_crypt(aead_req, rec->sg_aead_in,
-                              rec->sg_aead_out,
+       aead_request_set_crypt(aead_req, plain_sg, enc_sg,
                               data_len, tls_ctx->tx.iv);
 
        aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
@@ -480,8 +534,8 @@ static int tls_do_encryption(struct sock *sk,
        rc = crypto_aead_encrypt(aead_req);
        if (!rc || rc != -EINPROGRESS) {
                atomic_dec(&ctx->encrypt_pending);
-               rec->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
-               rec->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
+               rec->sg_encrypted_data[1].offset -= tls_ctx->tx.prepend_size;
+               rec->sg_encrypted_data[1].length += tls_ctx->tx.prepend_size;
        }
 
        if (!rc) {
@@ -512,16 +566,16 @@ static int tls_push_record(struct sock *sk, int flags,
        rec->tx_flags = flags;
        req = &rec->aead_req;
 
-       sg_mark_end(rec->sg_plaintext_data + rec->sg_plaintext_num_elem - 1);
-       sg_mark_end(rec->sg_encrypted_data + rec->sg_encrypted_num_elem - 1);
+       sg_mark_end(rec->sg_plaintext_data + rec->sg_plaintext_num_elem);
+       sg_mark_end(rec->sg_encrypted_data + rec->sg_encrypted_num_elem);
 
        tls_make_aad(rec->aad_space, rec->sg_plaintext_size,
                     tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
                     record_type);
 
        tls_fill_prepend(tls_ctx,
-                        page_address(sg_page(&rec->sg_encrypted_data[0])) +
-                        rec->sg_encrypted_data[0].offset,
+                        page_address(sg_page(&rec->sg_encrypted_data[1])) +
+                        rec->sg_encrypted_data[1].offset,
                         rec->sg_plaintext_size, record_type);
 
        tls_ctx->pending_open_record_frags = 0;
@@ -613,7 +667,7 @@ static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
        struct tls_rec *rec = ctx->open_rec;
-       struct scatterlist *sg = rec->sg_plaintext_data;
+       struct scatterlist *sg = &rec->sg_plaintext_data[1];
        int copy, i, rc = 0;
 
        for (i = tls_ctx->pending_open_record_frags;
@@ -637,7 +691,7 @@ out:
        return rc;
 }
 
-struct tls_rec *get_rec(struct sock *sk)
+static struct tls_rec *get_rec(struct sock *sk)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
@@ -659,19 +713,13 @@ struct tls_rec *get_rec(struct sock *sk)
        sg_init_table(&rec->sg_encrypted_data[0],
                      ARRAY_SIZE(rec->sg_encrypted_data));
 
-       sg_init_table(rec->sg_aead_in, 2);
-       sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
+       sg_set_buf(&rec->sg_plaintext_data[0], rec->aad_space,
                   sizeof(rec->aad_space));
-       sg_unmark_end(&rec->sg_aead_in[1]);
-       sg_chain(rec->sg_aead_in, 2, rec->sg_plaintext_data);
-
-       sg_init_table(rec->sg_aead_out, 2);
-       sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
+       sg_set_buf(&rec->sg_encrypted_data[0], rec->aad_space,
                   sizeof(rec->aad_space));
-       sg_unmark_end(&rec->sg_aead_out[1]);
-       sg_chain(rec->sg_aead_out, 2, rec->sg_encrypted_data);
 
        ctx->open_rec = rec;
+       rec->inplace_crypto = 1;
 
        return rec;
 }
@@ -763,12 +811,14 @@ alloc_encrypted:
                        ret = zerocopy_from_iter(sk, &msg->msg_iter,
                                try_to_copy, &rec->sg_plaintext_num_elem,
                                &rec->sg_plaintext_size,
-                               rec->sg_plaintext_data,
-                               ARRAY_SIZE(rec->sg_plaintext_data),
+                               &rec->sg_plaintext_data[1],
+                               ARRAY_SIZE(rec->sg_plaintext_data) - 1,
                                true);
                        if (ret)
                                goto fallback_to_reg_send;
 
+                       rec->inplace_crypto = 0;
+
                        num_zc++;
                        copied += try_to_copy;
                        ret = tls_push_record(sk, msg->msg_flags, record_type);
@@ -781,18 +831,18 @@ alloc_encrypted:
                        continue;
 
 fallback_to_reg_send:
-                       trim_sg(sk, rec->sg_plaintext_data,
+                       trim_sg(sk, &rec->sg_plaintext_data[1],
                                &rec->sg_plaintext_num_elem,
                                &rec->sg_plaintext_size,
                                orig_size);
                }
 
                required_size = rec->sg_plaintext_size + try_to_copy;
-alloc_plaintext:
-               ret = alloc_plaintext_sg(sk, required_size);
+
+               ret = move_to_plaintext_sg(sk, required_size);
                if (ret) {
                        if (ret != -ENOSPC)
-                               goto wait_for_memory;
+                               goto send_end;
 
                        /* Adjust try_to_copy according to the amount that was
                         * actually allocated. The difference is due
@@ -801,7 +851,7 @@ alloc_plaintext:
                        try_to_copy -= required_size - rec->sg_plaintext_size;
                        full_record = true;
 
-                       trim_sg(sk, rec->sg_encrypted_data,
+                       trim_sg(sk, &rec->sg_encrypted_data[1],
                                &rec->sg_encrypted_num_elem,
                                &rec->sg_encrypted_size,
                                rec->sg_plaintext_size +
@@ -837,8 +887,6 @@ trim_sgl:
 
                if (rec->sg_encrypted_size < required_size)
                        goto alloc_encrypted;
-
-               goto alloc_plaintext;
        }
 
        if (!num_async) {
@@ -949,7 +997,7 @@ alloc_payload:
                }
 
                get_page(page);
-               sg = rec->sg_plaintext_data + rec->sg_plaintext_num_elem;
+               sg = &rec->sg_plaintext_data[1] + rec->sg_plaintext_num_elem;
                sg_set_page(sg, page, copy, offset);
                sg_unmark_end(sg);
 
@@ -963,7 +1011,8 @@ alloc_payload:
 
                if (full_record || eor ||
                    rec->sg_plaintext_num_elem ==
-                   ARRAY_SIZE(rec->sg_plaintext_data)) {
+                   ARRAY_SIZE(rec->sg_plaintext_data) - 1) {
+                       rec->inplace_crypto = 0;
                        ret = tls_push_record(sk, flags, record_type);
                        if (ret) {
                                if (ret == -EINPROGRESS)
@@ -1571,7 +1620,7 @@ void tls_sw_free_resources_tx(struct sock *sk)
                rec = list_first_entry(&ctx->tx_list,
                                       struct tls_rec, list);
 
-               free_sg(sk, rec->sg_plaintext_data,
+               free_sg(sk, &rec->sg_plaintext_data[1],
                        &rec->sg_plaintext_num_elem,
                        &rec->sg_plaintext_size);
 
@@ -1580,11 +1629,11 @@ void tls_sw_free_resources_tx(struct sock *sk)
        }
 
        list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
-               free_sg(sk, rec->sg_encrypted_data,
+               free_sg(sk, &rec->sg_encrypted_data[1],
                        &rec->sg_encrypted_num_elem,
                        &rec->sg_encrypted_size);
 
-               free_sg(sk, rec->sg_plaintext_data,
+               free_sg(sk, &rec->sg_plaintext_data[1],
                        &rec->sg_plaintext_num_elem,
                        &rec->sg_plaintext_size);
 
index d5f9b5235cdd26329486f7749ec95b7c40ad6dca..a02bbdd1b19243f238e13c1b9a1f81c24d32be2f 100644 (file)
@@ -3756,6 +3756,7 @@ static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
                        return false;
 
                /* check availability */
+               ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN);
                if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
                        mcs[ridx] |= rbit;
                else
@@ -10234,7 +10235,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        s32 last, low, high;
        u32 hyst;
-       int i, n;
+       int i, n, low_index;
        int err;
 
        /* RSSI reporting disabled? */
@@ -10271,10 +10272,19 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
                if (last < wdev->cqm_config->rssi_thresholds[i])
                        break;
 
-       low = i > 0 ?
-               (wdev->cqm_config->rssi_thresholds[i - 1] - hyst) : S32_MIN;
-       high = i < n ?
-               (wdev->cqm_config->rssi_thresholds[i] + hyst - 1) : S32_MAX;
+       low_index = i - 1;
+       if (low_index >= 0) {
+               low_index = array_index_nospec(low_index, n);
+               low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
+       } else {
+               low = S32_MIN;
+       }
+       if (i < n) {
+               i = array_index_nospec(i, n);
+               high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
+       } else {
+               high = S32_MAX;
+       }
 
        return rdev_set_cqm_rssi_range_config(rdev, dev, low, high);
 }
index 56be68a27bb96a84492555a6fe67830674757e08..148c229fe84f1949536116bd9901a6ab8349585f 100644 (file)
@@ -2667,11 +2667,12 @@ static void reg_process_hint(struct regulatory_request *reg_request)
 {
        struct wiphy *wiphy = NULL;
        enum reg_request_treatment treatment;
+       enum nl80211_reg_initiator initiator = reg_request->initiator;
 
        if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
                wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
 
-       switch (reg_request->initiator) {
+       switch (initiator) {
        case NL80211_REGDOM_SET_BY_CORE:
                treatment = reg_process_hint_core(reg_request);
                break;
@@ -2689,7 +2690,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
                treatment = reg_process_hint_country_ie(wiphy, reg_request);
                break;
        default:
-               WARN(1, "invalid initiator %d\n", reg_request->initiator);
+               WARN(1, "invalid initiator %d\n", initiator);
                goto out_free;
        }
 
@@ -2704,7 +2705,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
         */
        if (treatment == REG_REQ_ALREADY_SET && wiphy &&
            wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
-               wiphy_update_regulatory(wiphy, reg_request->initiator);
+               wiphy_update_regulatory(wiphy, initiator);
                wiphy_all_share_dfs_chan_state(wiphy);
                reg_check_channels();
        }
@@ -2873,6 +2874,7 @@ static int regulatory_hint_core(const char *alpha2)
        request->alpha2[0] = alpha2[0];
        request->alpha2[1] = alpha2[1];
        request->initiator = NL80211_REGDOM_SET_BY_CORE;
+       request->wiphy_idx = WIPHY_IDX_INVALID;
 
        queue_regulatory_request(request);
 
index d36c3eb7b9311fc75bdaa020aa0318546efd4128..d0e7472dd9fd4b2a8938334129f24a60ea3fb421 100644 (file)
@@ -1058,13 +1058,23 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
        return NULL;
 }
 
+/*
+ * Update RX channel information based on the available frame payload
+ * information. This is mainly for the 2.4 GHz band where frames can be received
+ * from neighboring channels and the Beacon frames use the DSSS Parameter Set
+ * element to indicate the current (transmitting) channel, but this might also
+ * be needed on other bands if RX frequency does not match with the actual
+ * operating channel of a BSS.
+ */
 static struct ieee80211_channel *
 cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
-                        struct ieee80211_channel *channel)
+                        struct ieee80211_channel *channel,
+                        enum nl80211_bss_scan_width scan_width)
 {
        const u8 *tmp;
        u32 freq;
        int channel_number = -1;
+       struct ieee80211_channel *alt_channel;
 
        tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
        if (tmp && tmp[1] == 1) {
@@ -1078,16 +1088,45 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
                }
        }
 
-       if (channel_number < 0)
+       if (channel_number < 0) {
+               /* No channel information in frame payload */
                return channel;
+       }
 
        freq = ieee80211_channel_to_frequency(channel_number, channel->band);
-       channel = ieee80211_get_channel(wiphy, freq);
-       if (!channel)
-               return NULL;
-       if (channel->flags & IEEE80211_CHAN_DISABLED)
+       alt_channel = ieee80211_get_channel(wiphy, freq);
+       if (!alt_channel) {
+               if (channel->band == NL80211_BAND_2GHZ) {
+                       /*
+                        * Better not allow unexpected channels when that could
+                        * be going beyond the 1-11 range (e.g., discovering
+                        * BSS on channel 12 when radio is configured for
+                        * channel 11.
+                        */
+                       return NULL;
+               }
+
+               /* No match for the payload channel number - ignore it */
+               return channel;
+       }
+
+       if (scan_width == NL80211_BSS_CHAN_WIDTH_10 ||
+           scan_width == NL80211_BSS_CHAN_WIDTH_5) {
+               /*
+                * Ignore channel number in 5 and 10 MHz channels where there
+                * may not be an n:1 or 1:n mapping between frequencies and
+                * channel numbers.
+                */
+               return channel;
+       }
+
+       /*
+        * Use the channel determined through the payload channel number
+        * instead of the RX channel reported by the driver.
+        */
+       if (alt_channel->flags & IEEE80211_CHAN_DISABLED)
                return NULL;
-       return channel;
+       return alt_channel;
 }
 
 /* Returned bss is reference counted and must be cleaned up appropriately. */
@@ -1112,7 +1151,8 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
                    (data->signal < 0 || data->signal > 100)))
                return NULL;
 
-       channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan);
+       channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
+                                          data->scan_width);
        if (!channel)
                return NULL;
 
@@ -1210,7 +1250,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
                return NULL;
 
        channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
-                                          ielen, data->chan);
+                                          ielen, data->chan, data->scan_width);
        if (!channel)
                return NULL;
 
index 167f7025ac98288acbd57cd4627b1eb7fb2f6520..06943d9c983522d499395f733f000d8630aa04d0 100644 (file)
@@ -1278,12 +1278,16 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
        if (err)
                return err;
 
-       if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)))
-               return -EOPNOTSUPP;
+       if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) {
+               err = -EOPNOTSUPP;
+               goto free;
+       }
 
        rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
 
-       return 0;
+free:
+       cfg80211_sinfo_release_content(&sinfo);
+       return err;
 }
 
 /* Get wireless statistics.  Called by /proc/net/wireless and by SIOCGIWSTATS */
@@ -1293,7 +1297,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        /* we are under RTNL - globally locked - so can use static structs */
        static struct iw_statistics wstats;
-       static struct station_info sinfo;
+       static struct station_info sinfo = {};
        u8 bssid[ETH_ALEN];
 
        if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
@@ -1352,6 +1356,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
        if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))
                wstats.discard.retries = sinfo.tx_failed;
 
+       cfg80211_sinfo_release_content(&sinfo);
+
        return &wstats;
 }
 
index 260fbba4f03eea65cb344d0ffad85a60aabbb17b..144c137886b1627299d304f7c11803b876c3c061 100644 (file)
@@ -192,9 +192,13 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
 
        err = dev->xfrmdev_ops->xdo_dev_state_add(x);
        if (err) {
+               xso->num_exthdrs = 0;
+               xso->flags = 0;
                xso->dev = NULL;
                dev_put(dev);
-               return err;
+
+               if (err != -EOPNOTSUPP)
+                       return err;
        }
 
        return 0;
index b89c9c7f8c5c12a13772ae4d838ce9f121bd51f5..be3520e429c9f989a712f7bf32874bed7d3aa667 100644 (file)
@@ -458,6 +458,7 @@ resume:
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
                        goto drop;
                }
+               crypto_done = false;
        } while (!err);
 
        err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
index 4b4ef4f662d9d2e40a0bb8f35af47c59177ce31c..dc5b20bf29cf4ecda5d161327fb6b34faf1cfbaa 100644 (file)
@@ -742,7 +742,7 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-struct net *xfrmi_get_link_net(const struct net_device *dev)
+static struct net *xfrmi_get_link_net(const struct net_device *dev)
 {
        struct xfrm_if *xi = netdev_priv(dev);
 
index 2d42cb0c94b823ce1d6b7872d73ef705c96077c2..4ae87c5ce2e357b420aaa102bdbb416e7039f90a 100644 (file)
@@ -100,6 +100,10 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
                spin_unlock_bh(&x->lock);
 
                skb_dst_force(skb);
+               if (!skb_dst(skb)) {
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+                       goto error_nolock;
+               }
 
                if (xfrm_offload(skb)) {
                        x->type_offload->encap(x, skb);
index 3110c3fbee2099e7a4563a99c988e5ad66d0658c..f094d4b3520d97773b87baf5700df79fc8ca4666 100644 (file)
@@ -2491,6 +2491,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
        }
 
        skb_dst_force(skb);
+       if (!skb_dst(skb)) {
+               XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
+               return 0;
+       }
 
        dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
        if (IS_ERR(dst)) {
index 4791aa8b818583b5fcb5812fd561342fbab2edfa..ca7a207b81a9587c942dd8763e4444cc5675f1ee 100644 (file)
@@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
        err = -EINVAL;
        switch (p->family) {
        case AF_INET:
+               if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+                       goto out;
+
                break;
 
        case AF_INET6:
 #if IS_ENABLED(CONFIG_IPV6)
+               if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+                       goto out;
+
                break;
 #else
                err = -EAFNOSUPPORT;
@@ -1001,7 +1007,7 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
                int err;
 
                err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX, xfrma_policy,
-                                 NULL);
+                                 cb->extack);
                if (err < 0)
                        return err;
 
@@ -1396,10 +1402,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
 
        switch (p->sel.family) {
        case AF_INET:
+               if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+                       return -EINVAL;
+
                break;
 
        case AF_INET6:
 #if IS_ENABLED(CONFIG_IPV6)
+               if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+                       return -EINVAL;
+
                break;
 #else
                return  -EAFNOSUPPORT;
@@ -1480,6 +1492,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                    (ut[i].family != prev_family))
                        return -EINVAL;
 
+               if (ut[i].mode >= XFRM_MODE_MAX)
+                       return -EINVAL;
+
                prev_family = ut[i].family;
 
                switch (ut[i].family) {
index b5282cbbe48981d350f396525013fc571cdf62fa..617ff1aa818f991a01091a9e03aa7952978c0b92 100644 (file)
@@ -145,9 +145,11 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
        if (!acomp->ops) {
                request_module("i915");
                /* 10s timeout */
-               wait_for_completion_timeout(&bind_complete, 10 * 1000);
+               wait_for_completion_timeout(&bind_complete,
+                                           msecs_to_jiffies(10 * 1000));
        }
        if (!acomp->ops) {
+               dev_info(bus->dev, "couldn't bind with audio component\n");
                snd_hdac_acomp_exit(bus);
                return -ENODEV;
        }
index 1d117f00d04d5620e767500a00f478fa9fc2b94a..3ac7ba9b342d24dd105cd828456715a18b7a8709 100644 (file)
@@ -6409,6 +6409,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
        SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
        SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
        SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
index 439b8a27488d371fe323f879ba225650df23a359..195ba486640f9b8797981958bc221dd2102306e8 100755 (executable)
@@ -1325,7 +1325,7 @@ class Tui(object):
         msg = ''
         while True:
             self.screen.erase()
-            self.screen.addstr(0, 0, 'Set update interval (defaults to %fs).' %
+            self.screen.addstr(0, 0, 'Set update interval (defaults to %.1fs).' %
                                DELAY_DEFAULT, curses.A_BOLD)
             self.screen.addstr(4, 0, msg)
             self.screen.addstr(2, 0, 'Change delay from %.1fs to ' %
index 0f45633bd634a9c5692cd1d3c5da1c82a40756cb..802b4af187297a7c8f2ddf1e086877b35cd41687 100755 (executable)
@@ -9,11 +9,11 @@ ret=0
 ksft_skip=4
 
 # all tests in this script. Can be overridden with -t option
-TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric"
+TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics"
 VERBOSE=0
 PAUSE_ON_FAIL=no
 PAUSE=no
-IP="ip -netns testns"
+IP="ip -netns ns1"
 
 log_test()
 {
@@ -47,8 +47,10 @@ log_test()
 setup()
 {
        set -e
-       ip netns add testns
+       ip netns add ns1
        $IP link set dev lo up
+       ip netns exec ns1 sysctl -qw net.ipv4.ip_forward=1
+       ip netns exec ns1 sysctl -qw net.ipv6.conf.all.forwarding=1
 
        $IP link add dummy0 type dummy
        $IP link set dev dummy0 up
@@ -61,7 +63,8 @@ setup()
 cleanup()
 {
        $IP link del dev dummy0 &> /dev/null
-       ip netns del testns
+       ip netns del ns1
+       ip netns del ns2 &> /dev/null
 }
 
 get_linklocal()
@@ -639,11 +642,14 @@ add_initial_route6()
 
 check_route6()
 {
-       local pfx="2001:db8:104::/64"
+       local pfx
        local expected="$1"
        local out
        local rc=0
 
+       set -- $expected
+       pfx=$1
+
        out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//')
        [ "${out}" = "${expected}" ] && return 0
 
@@ -690,28 +696,33 @@ route_setup()
        [ "${VERBOSE}" = "1" ] && set -x
        set -e
 
-       $IP li add red up type vrf table 101
+       ip netns add ns2
+       ip -netns ns2 link set dev lo up
+       ip netns exec ns2 sysctl -qw net.ipv4.ip_forward=1
+       ip netns exec ns2 sysctl -qw net.ipv6.conf.all.forwarding=1
+
        $IP li add veth1 type veth peer name veth2
        $IP li add veth3 type veth peer name veth4
 
        $IP li set veth1 up
        $IP li set veth3 up
-       $IP li set veth2 vrf red up
-       $IP li set veth4 vrf red up
-       $IP li add dummy1 type dummy
-       $IP li set dummy1 vrf red up
-
-       $IP -6 addr add 2001:db8:101::1/64 dev veth1
-       $IP -6 addr add 2001:db8:101::2/64 dev veth2
-       $IP -6 addr add 2001:db8:103::1/64 dev veth3
-       $IP -6 addr add 2001:db8:103::2/64 dev veth4
-       $IP -6 addr add 2001:db8:104::1/64 dev dummy1
+       $IP li set veth2 netns ns2 up
+       $IP li set veth4 netns ns2 up
+       ip -netns ns2 li add dummy1 type dummy
+       ip -netns ns2 li set dummy1 up
 
+       $IP -6 addr add 2001:db8:101::1/64 dev veth1 nodad
+       $IP -6 addr add 2001:db8:103::1/64 dev veth3 nodad
        $IP addr add 172.16.101.1/24 dev veth1
-       $IP addr add 172.16.101.2/24 dev veth2
        $IP addr add 172.16.103.1/24 dev veth3
-       $IP addr add 172.16.103.2/24 dev veth4
-       $IP addr add 172.16.104.1/24 dev dummy1
+
+       ip -netns ns2 -6 addr add 2001:db8:101::2/64 dev veth2 nodad
+       ip -netns ns2 -6 addr add 2001:db8:103::2/64 dev veth4 nodad
+       ip -netns ns2 -6 addr add 2001:db8:104::1/64 dev dummy1 nodad
+
+       ip -netns ns2 addr add 172.16.101.2/24 dev veth2
+       ip -netns ns2 addr add 172.16.103.2/24 dev veth4
+       ip -netns ns2 addr add 172.16.104.1/24 dev dummy1
 
        set +ex
 }
@@ -944,7 +955,7 @@ ipv6_addr_metric_test()
        log_test $rc 0 "Modify metric of address"
 
        # verify prefix route removed on down
-       run_cmd "ip netns exec testns sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1"
+       run_cmd "ip netns exec ns1 sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1"
        run_cmd "$IP li set dev dummy2 down"
        rc=$?
        if [ $rc -eq 0 ]; then
@@ -967,6 +978,77 @@ ipv6_addr_metric_test()
        cleanup
 }
 
+ipv6_route_metrics_test()
+{
+       local rc
+
+       echo
+       echo "IPv6 routes with metrics"
+
+       route_setup
+
+       #
+       # single path with metrics
+       #
+       run_cmd "$IP -6 ro add 2001:db8:111::/64 via 2001:db8:101::2 mtu 1400"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route6  "2001:db8:111::/64 via 2001:db8:101::2 dev veth1 metric 1024 mtu 1400"
+               rc=$?
+       fi
+       log_test $rc 0 "Single path route with mtu metric"
+
+
+       #
+       # multipath via separate routes with metrics
+       #
+       run_cmd "$IP -6 ro add 2001:db8:112::/64 via 2001:db8:101::2 mtu 1400"
+       run_cmd "$IP -6 ro append 2001:db8:112::/64 via 2001:db8:103::2"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route6 "2001:db8:112::/64 metric 1024 mtu 1400 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
+               rc=$?
+       fi
+       log_test $rc 0 "Multipath route via 2 single routes with mtu metric on first"
+
+       # second route is coalesced to first to make a multipath route.
+       # MTU of the second path is hidden from display!
+       run_cmd "$IP -6 ro add 2001:db8:113::/64 via 2001:db8:101::2"
+       run_cmd "$IP -6 ro append 2001:db8:113::/64 via 2001:db8:103::2 mtu 1400"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route6 "2001:db8:113::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
+               rc=$?
+       fi
+       log_test $rc 0 "Multipath route via 2 single routes with mtu metric on 2nd"
+
+       run_cmd "$IP -6 ro del 2001:db8:113::/64 via 2001:db8:101::2"
+       if [ $? -eq 0 ]; then
+               check_route6 "2001:db8:113::/64 via 2001:db8:103::2 dev veth3 metric 1024 mtu 1400"
+               log_test $? 0 "    MTU of second leg"
+       fi
+
+       #
+       # multipath with metrics
+       #
+       run_cmd "$IP -6 ro add 2001:db8:115::/64 mtu 1400 nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route6  "2001:db8:115::/64 metric 1024 mtu 1400 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
+               rc=$?
+       fi
+       log_test $rc 0 "Multipath route with mtu metric"
+
+       $IP -6 ro add 2001:db8:104::/64 via 2001:db8:101::2 mtu 1300
+       run_cmd "ip netns exec ns1 ping6 -w1 -c1 -s 1500 2001:db8:104::1"
+       log_test $? 0 "Using route with mtu metric"
+
+       run_cmd "$IP -6 ro add 2001:db8:114::/64 via  2001:db8:101::2  congctl lock foo"
+       log_test $? 2 "Invalid metric (fails metric_convert)"
+
+       route_cleanup
+}
+
 # add route for a prefix, flushing any existing routes first
 # expected to be the first step of a test
 add_route()
@@ -1005,11 +1087,15 @@ add_initial_route()
 
 check_route()
 {
-       local pfx="172.16.104.0/24"
+       local pfx
        local expected="$1"
        local out
        local rc=0
 
+       set -- $expected
+       pfx=$1
+       [ "${pfx}" = "unreachable" ] && pfx=$2
+
        out=$($IP ro ls match ${pfx})
        [ "${out}" = "${expected}" ] && return 0
 
@@ -1319,6 +1405,43 @@ ipv4_addr_metric_test()
        cleanup
 }
 
+ipv4_route_metrics_test()
+{
+       local rc
+
+       echo
+       echo "IPv4 route add / append tests"
+
+       route_setup
+
+       run_cmd "$IP ro add 172.16.111.0/24 via 172.16.101.2 mtu 1400"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.111.0/24 via 172.16.101.2 dev veth1 mtu 1400"
+               rc=$?
+       fi
+       log_test $rc 0 "Single path route with mtu metric"
+
+
+       run_cmd "$IP ro add 172.16.112.0/24 mtu 1400 nexthop via 172.16.101.2 nexthop via 172.16.103.2"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.112.0/24 mtu 1400 nexthop via 172.16.101.2 dev veth1 weight 1 nexthop via 172.16.103.2 dev veth3 weight 1"
+               rc=$?
+       fi
+       log_test $rc 0 "Multipath route with mtu metric"
+
+       $IP ro add 172.16.104.0/24 via 172.16.101.2 mtu 1300
+       run_cmd "ip netns exec ns1 ping -w1 -c1 -s 1500 172.16.104.1"
+       log_test $? 0 "Using route with mtu metric"
+
+       run_cmd "$IP ro add 172.16.111.0/24 via 172.16.101.2 congctl lock foo"
+       log_test $? 2 "Invalid metric (fails metric_convert)"
+
+       route_cleanup
+}
+
+
 ################################################################################
 # usage
 
@@ -1385,6 +1508,8 @@ do
        ipv4_route_test|ipv4_rt)        ipv4_route_test;;
        ipv6_addr_metric)               ipv6_addr_metric_test;;
        ipv4_addr_metric)               ipv4_addr_metric_test;;
+       ipv6_route_metrics)             ipv6_route_metrics_test;;
+       ipv4_route_metrics)             ipv4_route_metrics_test;;
 
        help) echo "Test names: $TESTS"; exit 0;;
        esac
diff --git a/tools/testing/selftests/net/forwarding/bridge_sticky_fdb.sh b/tools/testing/selftests/net/forwarding/bridge_sticky_fdb.sh
new file mode 100755 (executable)
index 0000000..1f8ef0e
--- /dev/null
@@ -0,0 +1,69 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="sticky"
+NUM_NETIFS=4
+TEST_MAC=de:ad:be:ef:13:37
+source lib.sh
+
+switch_create()
+{
+       ip link add dev br0 type bridge
+
+       ip link set dev $swp1 master br0
+       ip link set dev $swp2 master br0
+
+       ip link set dev br0 up
+       ip link set dev $h1 up
+       ip link set dev $swp1 up
+       ip link set dev $h2 up
+       ip link set dev $swp2 up
+}
+
+switch_destroy()
+{
+       ip link set dev $swp2 down
+       ip link set dev $h2 down
+       ip link set dev $swp1 down
+       ip link set dev $h1 down
+
+       ip link del dev br0
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       swp1=${NETIFS[p2]}
+       h2=${NETIFS[p3]}
+       swp2=${NETIFS[p4]}
+
+       switch_create
+}
+
+cleanup()
+{
+       pre_cleanup
+       switch_destroy
+}
+
+sticky()
+{
+       bridge fdb add $TEST_MAC dev $swp1 master static sticky
+       check_err $? "Could not add fdb entry"
+       bridge fdb del $TEST_MAC dev $swp1 vlan 1 master static sticky
+       $MZ $h2 -c 1 -a $TEST_MAC -t arp "request" -q
+       bridge -j fdb show br br0 brport $swp1\
+               | jq -e ".[] | select(.mac == \"$TEST_MAC\")" &> /dev/null
+       check_err $? "Did not find FDB record when should"
+
+       log_test "Sticky fdb entry"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
index 2366dc6bce71605f281b9300dea8c415796aac4e..61ae2782388e9d0921b06a4589c5d5662cb02e8e 100644 (file)
@@ -203,7 +203,6 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
 {
        struct ip *iphdr = (struct ip *)ip_frame;
        struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame;
-       struct udphdr udphdr;
        int res;
        int offset;
        int frag_len;
index 0ab9423d009f5b8c189dddb4a1f8f81c8a3a2101..b9cdb68df4c5f8ef4fbed88150ca0cc467ca5e85 100755 (executable)
@@ -6,6 +6,26 @@
 #
 # Tests currently implemented:
 #
+# - pmtu_ipv4
+#      Set up two namespaces, A and B, with two paths between them over routers
+#      R1 and R2 (also implemented with namespaces), with different MTUs:
+#
+#        segment a_r1    segment b_r1          a_r1: 2000
+#      .--------------R1--------------.        a_r2: 1500
+#      A                               B       a_r3: 2000
+#      '--------------R2--------------'        a_r4: 1400
+#        segment a_r2    segment b_r2
+#
+#      Check that PMTU exceptions with the correct PMTU are created. Then
+#      decrease and increase the MTU of the local link for one of the paths,
+#      A to R1, checking that route exception PMTU changes accordingly over
+#      this path. Also check that locked exceptions are created when an ICMP
+#      message advertising a PMTU smaller than net.ipv4.route.min_pmtu is
+#      received
+#
+# - pmtu_ipv6
+#      Same as pmtu_ipv4, except for locked PMTU tests, using IPv6
+#
 # - pmtu_vti4_exception
 #      Set up vti tunnel on top of veth, with xfrm states and policies, in two
 #      namespaces with matching endpoints. Check that route exception is not
@@ -50,6 +70,8 @@ ksft_skip=4
 which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
 
 tests="
+       pmtu_ipv4_exception             ipv4: PMTU exceptions
+       pmtu_ipv6_exception             ipv6: PMTU exceptions
        pmtu_vti6_exception             vti6: PMTU exceptions
        pmtu_vti4_exception             vti4: PMTU exceptions
        pmtu_vti4_default_mtu           vti4: default MTU assignment
@@ -60,8 +82,45 @@ tests="
 
 NS_A="ns-$(mktemp -u XXXXXX)"
 NS_B="ns-$(mktemp -u XXXXXX)"
+NS_R1="ns-$(mktemp -u XXXXXX)"
+NS_R2="ns-$(mktemp -u XXXXXX)"
 ns_a="ip netns exec ${NS_A}"
 ns_b="ip netns exec ${NS_B}"
+ns_r1="ip netns exec ${NS_R1}"
+ns_r2="ip netns exec ${NS_R2}"
+
+# Addressing and routing for tests with routers: four network segments, with
+# index SEGMENT between 1 and 4, a common prefix (PREFIX4 or PREFIX6) and an
+# identifier ID, which is 1 for hosts (A and B), 2 for routers (R1 and R2).
+# Addresses are:
+# - IPv4: PREFIX4.SEGMENT.ID (/24)
+# - IPv6: PREFIX6:SEGMENT::ID (/64)
+prefix4="192.168"
+prefix6="fd00"
+a_r1=1
+a_r2=2
+b_r1=3
+b_r2=4
+#      ns      peer    segment
+routing_addrs="
+       A       R1      ${a_r1}
+       A       R2      ${a_r2}
+       B       R1      ${b_r1}
+       B       R2      ${b_r2}
+"
+# Traffic from A to B goes through R1 by default, and through R2, if destined to
+# B's address on the b_r2 segment.
+# Traffic from B to A goes through R1.
+#      ns      destination             gateway
+routes="
+       A       default                 ${prefix4}.${a_r1}.2
+       A       ${prefix4}.${b_r2}.1    ${prefix4}.${a_r2}.2
+       B       default                 ${prefix4}.${b_r1}.2
+
+       A       default                 ${prefix6}:${a_r1}::2
+       A       ${prefix6}:${b_r2}::1   ${prefix6}:${a_r2}::2
+       B       default                 ${prefix6}:${b_r1}::2
+"
 
 veth4_a_addr="192.168.1.1"
 veth4_b_addr="192.168.1.2"
@@ -94,9 +153,15 @@ err_flush() {
        err_buf=
 }
 
+# Find the auto-generated name for this namespace
+nsname() {
+       eval echo \$NS_$1
+}
+
 setup_namespaces() {
-       ip netns add ${NS_A} || return 1
-       ip netns add ${NS_B}
+       for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do
+               ip netns add ${n} || return 1
+       done
 }
 
 setup_veth() {
@@ -167,6 +232,49 @@ setup_xfrm6() {
        setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr}
 }
 
+setup_routing() {
+       for i in ${NS_R1} ${NS_R2}; do
+               ip netns exec ${i} sysctl -q net/ipv4/ip_forward=1
+               ip netns exec ${i} sysctl -q net/ipv6/conf/all/forwarding=1
+       done
+
+       for i in ${routing_addrs}; do
+               [ "${ns}" = "" ]        && ns="${i}"            && continue
+               [ "${peer}" = "" ]      && peer="${i}"          && continue
+               [ "${segment}" = "" ]   && segment="${i}"
+
+               ns_name="$(nsname ${ns})"
+               peer_name="$(nsname ${peer})"
+               if="veth_${ns}-${peer}"
+               ifpeer="veth_${peer}-${ns}"
+
+               # Create veth links
+               ip link add ${if} up netns ${ns_name} type veth peer name ${ifpeer} netns ${peer_name} || return 1
+               ip -n ${peer_name} link set dev ${ifpeer} up
+
+               # Add addresses
+               ip -n ${ns_name}   addr add ${prefix4}.${segment}.1/24  dev ${if}
+               ip -n ${ns_name}   addr add ${prefix6}:${segment}::1/64 dev ${if}
+
+               ip -n ${peer_name} addr add ${prefix4}.${segment}.2/24  dev ${ifpeer}
+               ip -n ${peer_name} addr add ${prefix6}:${segment}::2/64 dev ${ifpeer}
+
+               ns=""; peer=""; segment=""
+       done
+
+       for i in ${routes}; do
+               [ "${ns}" = "" ]        && ns="${i}"            && continue
+               [ "${addr}" = "" ]      && addr="${i}"          && continue
+               [ "${gw}" = "" ]        && gw="${i}"
+
+               ns_name="$(nsname ${ns})"
+
+               ip -n ${ns_name} route add ${addr} via ${gw}
+
+               ns=""; addr=""; gw=""
+       done
+}
+
 setup() {
        [ "$(id -u)" -ne 0 ] && echo "  need to run as root" && return $ksft_skip
 
@@ -178,8 +286,9 @@ setup() {
 
 cleanup() {
        [ ${cleanup_done} -eq 1 ] && return
-       ip netns del ${NS_A} 2> /dev/null
-       ip netns del ${NS_B} 2> /dev/null
+       for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do
+               ip netns del ${n} 2> /dev/null
+       done
        cleanup_done=1
 }
 
@@ -196,7 +305,9 @@ mtu_parse() {
 
        next=0
        for i in ${input}; do
+               [ ${next} -eq 1 -a "${i}" = "lock" ] && next=2 && continue
                [ ${next} -eq 1 ] && echo "${i}" && return
+               [ ${next} -eq 2 ] && echo "lock ${i}" && return
                [ "${i}" = "mtu" ] && next=1
        done
 }
@@ -229,6 +340,109 @@ route_get_dst_pmtu_from_exception() {
        mtu_parse "$(route_get_dst_exception "${ns_cmd}" ${dst})"
 }
 
+check_pmtu_value() {
+       expected="${1}"
+       value="${2}"
+       event="${3}"
+
+       [ "${expected}" = "any" ] && [ -n "${value}" ] && return 0
+       [ "${value}" = "${expected}" ] && return 0
+       [ -z "${value}" ] &&    err "  PMTU exception wasn't created after ${event}" && return 1
+       [ -z "${expected}" ] && err "  PMTU exception shouldn't exist after ${event}" && return 1
+       err "  found PMTU exception with incorrect MTU ${value}, expected ${expected}, after ${event}"
+       return 1
+}
+
+test_pmtu_ipvX() {
+       family=${1}
+
+       setup namespaces routing || return 2
+
+       if [ ${family} -eq 4 ]; then
+               ping=ping
+               dst1="${prefix4}.${b_r1}.1"
+               dst2="${prefix4}.${b_r2}.1"
+       else
+               ping=${ping6}
+               dst1="${prefix6}:${b_r1}::1"
+               dst2="${prefix6}:${b_r2}::1"
+       fi
+
+       # Set up initial MTU values
+       mtu "${ns_a}"  veth_A-R1 2000
+       mtu "${ns_r1}" veth_R1-A 2000
+       mtu "${ns_r1}" veth_R1-B 1400
+       mtu "${ns_b}"  veth_B-R1 1400
+
+       mtu "${ns_a}"  veth_A-R2 2000
+       mtu "${ns_r2}" veth_R2-A 2000
+       mtu "${ns_r2}" veth_R2-B 1500
+       mtu "${ns_b}"  veth_B-R2 1500
+
+       # Create route exceptions
+       ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst1} > /dev/null
+       ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst2} > /dev/null
+
+       # Check that exceptions have been created with the correct PMTU
+       pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
+       check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1
+       pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+       check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1
+
+       # Decrease local MTU below PMTU, check for PMTU decrease in route exception
+       mtu "${ns_a}"  veth_A-R1 1300
+       mtu "${ns_r1}" veth_R1-A 1300
+       pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
+       check_pmtu_value "1300" "${pmtu_1}" "decreasing local MTU" || return 1
+       # Second exception shouldn't be modified
+       pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+       check_pmtu_value "1500" "${pmtu_2}" "changing local MTU on a link not on this path" || return 1
+
+       # Increase MTU, check for PMTU increase in route exception
+       mtu "${ns_a}"  veth_A-R1 1700
+       mtu "${ns_r1}" veth_R1-A 1700
+       pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
+       check_pmtu_value "1700" "${pmtu_1}" "increasing local MTU" || return 1
+       # Second exception shouldn't be modified
+       pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+       check_pmtu_value "1500" "${pmtu_2}" "changing local MTU on a link not on this path" || return 1
+
+       # Skip PMTU locking tests for IPv6
+       [ $family -eq 6 ] && return 0
+
+       # Decrease remote MTU on path via R2, get new exception
+       mtu "${ns_r2}" veth_R2-B 400
+       mtu "${ns_b}"  veth_B-R2 400
+       ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null
+       pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+       check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
+
+       # Decrease local MTU below PMTU
+       mtu "${ns_a}"  veth_A-R2 500
+       mtu "${ns_r2}" veth_R2-A 500
+       pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+       check_pmtu_value "500" "${pmtu_2}" "decreasing local MTU" || return 1
+
+       # Increase local MTU
+       mtu "${ns_a}"  veth_A-R2 1500
+       mtu "${ns_r2}" veth_R2-A 1500
+       pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+       check_pmtu_value "1500" "${pmtu_2}" "increasing local MTU" || return 1
+
+       # Get new exception
+       ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null
+       pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+       check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
+}
+
+test_pmtu_ipv4_exception() {
+       test_pmtu_ipvX 4
+}
+
+test_pmtu_ipv6_exception() {
+       test_pmtu_ipvX 6
+}
+
 test_pmtu_vti4_exception() {
        setup namespaces veth vti4 xfrm4 || return 2
 
@@ -248,24 +462,13 @@ test_pmtu_vti4_exception() {
        # exception is created
        ${ns_a} ping -q -M want -i 0.1 -w 2 -s ${ping_payload} ${vti4_b_addr} > /dev/null
        pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti4_b_addr})"
-       if [ "${pmtu}" != "" ]; then
-               err "  unexpected exception created with PMTU ${pmtu} for IP payload length ${esp_payload_rfc4106}"
-               return 1
-       fi
+       check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
 
        # Now exceed link layer MTU by one byte, check that exception is created
+       # with the right PMTU value
        ${ns_a} ping -q -M want -i 0.1 -w 2 -s $((ping_payload + 1)) ${vti4_b_addr} > /dev/null
        pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti4_b_addr})"
-       if [ "${pmtu}" = "" ]; then
-               err "  exception not created for IP payload length $((esp_payload_rfc4106 + 1))"
-               return 1
-       fi
-
-       # ...with the right PMTU value
-       if [ ${pmtu} -ne ${esp_payload_rfc4106} ]; then
-               err "  wrong PMTU ${pmtu} in exception, expected: ${esp_payload_rfc4106}"
-               return 1
-       fi
+       check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))"
 }
 
 test_pmtu_vti6_exception() {
@@ -280,25 +483,18 @@ test_pmtu_vti6_exception() {
        ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
 
        # Check that exception was created
-       if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then
-               err "  tunnel exceeding link layer MTU didn't create route exception"
-               return 1
-       fi
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})"
+       check_pmtu_value any "${pmtu}" "creating tunnel exceeding link layer MTU" || return 1
 
        # Decrease tunnel MTU, check for PMTU decrease in route exception
        mtu "${ns_a}" vti6_a 3000
-
-       if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" -ne 3000 ]; then
-               err "  decreasing tunnel MTU didn't decrease route exception PMTU"
-               fail=1
-       fi
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})"
+       check_pmtu_value "3000" "${pmtu}" "decreasing tunnel MTU" || fail=1
 
        # Increase tunnel MTU, check for PMTU increase in route exception
        mtu "${ns_a}" vti6_a 9000
-       if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" -ne 9000 ]; then
-               err "  increasing tunnel MTU didn't increase route exception PMTU"
-               fail=1
-       fi
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})"
+       check_pmtu_value "9000" "${pmtu}" "increasing tunnel MTU" || fail=1
 
        return ${fail}
 }
index 11d54c36ae49a019d940d1902ce5632c7282a7a2..fac68d710f3599f312625be3106e9d14d6c7a353 100644 (file)
@@ -288,7 +288,7 @@ TEST_F(tls, splice_from_pipe)
        ASSERT_GE(pipe(p), 0);
        EXPECT_GE(write(p[1], mem_send, send_len), 0);
        EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), 0);
-       EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+       EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len);
        EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
 }
 
@@ -322,13 +322,13 @@ TEST_F(tls, send_and_splice)
 
        ASSERT_GE(pipe(p), 0);
        EXPECT_EQ(send(self->fd, test_str, send_len2, 0), send_len2);
-       EXPECT_NE(recv(self->cfd, buf, send_len2, 0), -1);
+       EXPECT_EQ(recv(self->cfd, buf, send_len2, MSG_WAITALL), send_len2);
        EXPECT_EQ(memcmp(test_str, buf, send_len2), 0);
 
        EXPECT_GE(write(p[1], mem_send, send_len), send_len);
        EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), send_len);
 
-       EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+       EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len);
        EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
 }
 
@@ -516,17 +516,17 @@ TEST_F(tls, recv_peek_multiple_records)
        len = strlen(test_str_second) + 1;
        EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
 
-       len = sizeof(buf);
+       len = strlen(test_str_first);
        memset(buf, 0, len);
-       EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
+       EXPECT_EQ(recv(self->cfd, buf, len, MSG_PEEK | MSG_WAITALL), len);
 
        /* MSG_PEEK can only peek into the current record. */
-       len = strlen(test_str_first) + 1;
+       len = strlen(test_str_first);
        EXPECT_EQ(memcmp(test_str_first, buf, len), 0);
 
-       len = sizeof(buf);
+       len = strlen(test_str) + 1;
        memset(buf, 0, len);
-       EXPECT_NE(recv(self->cfd, buf, len, 0), -1);
+       EXPECT_EQ(recv(self->cfd, buf, len, MSG_WAITALL), len);
 
        /* Non-MSG_PEEK will advance strparser (and therefore record)
         * however.
@@ -543,9 +543,9 @@ TEST_F(tls, recv_peek_multiple_records)
        len = strlen(test_str_second) + 1;
        EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
 
-       len = sizeof(buf);
+       len = strlen(test_str) + 1;
        memset(buf, 0, len);
-       EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
+       EXPECT_EQ(recv(self->cfd, buf, len, MSG_PEEK | MSG_WAITALL), len);
 
        len = strlen(test_str) + 1;
        EXPECT_EQ(memcmp(test_str, buf, len), 0);
index 93baacab7693c48576e4872c8d31287931b73091..d056486f49de5eacad70b4813287094e5e597927 100644 (file)
@@ -1,5 +1,6 @@
 TEST_GEN_PROGS := copy_first_unaligned alignment_handler
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c ../utils.c
index b4d7432a0ecd1b4af5fa5fe2071276172a684dc5..d40300a65b42f79ba4c48705d909906ed9115dd0 100644 (file)
@@ -4,6 +4,7 @@ TEST_GEN_FILES := exec_target
 
 CFLAGS += -O2
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 1be547434a49c3feb8f7e40c03d4b5260a3b30d4..ede4d3dae7505ef31f822bac9b613aef464f6125 100644 (file)
@@ -5,6 +5,7 @@ all: $(TEST_PROGS)
 
 $(TEST_PROGS): ../harness.c ../utils.c
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 clean:
index 1cf89a34d97ca35299bf62b588b07d61ecbbb8a8..44574f3818b3d71d51a2021412ca70420b4543e2 100644 (file)
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \
 
 EXTRA_SOURCES := validate.c ../harness.c stubs.S
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/copyuser_64_t%:      copyuser_64.S $(EXTRA_SOURCES)
index 55d7db7a616bcd7661fceeb470ecb6f153cd9eac..5df476364b4d46dd1563889bbd86864622c29ffd 100644 (file)
@@ -3,6 +3,7 @@ TEST_GEN_PROGS := dscr_default_test dscr_explicit_test dscr_user_test   \
              dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test  \
              dscr_sysfs_thread_test
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/dscr_default_test: LDLIBS += -lpthread
index 0dd3a01fdab92bc887ddc70cda06fd20c89c5154..11a10d7a2bbd9f1c93ccbf309ac5d0dc9d688f2b 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 8ebbe96d80a8452575fb234b6eba23a19e2c3a4e..33ced6e0ad25e07047e19699c90303ed97d157fa 100644 (file)
@@ -5,6 +5,7 @@ noarg:
 TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors
 TEST_GEN_FILES := tempfile
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 6e1629bf5b09dbfb850acdb0f0c60aafdc6e3ea6..19046db995fee387a77f9fd2f2d130078c37cb71 100644 (file)
@@ -5,6 +5,7 @@ noarg:
 TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes
 EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 all: $(TEST_GEN_PROGS) ebb
index c4e64bc2e2650a2e02fac4a6f891e973a126eced..bd5dfa509272a75b97b1dbf8ac2be00c0a1ab1cc 100644 (file)
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test   \
         lost_exception_test no_handler_test                    \
         cycles_with_mmcr2_test
 
+top_srcdir = ../../../../../..
 include ../../../lib.mk
 
 $(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \
index 175366db7be8bc1261194ac80d009cd33e0c9c59..ea2b7bd09e369c4fb679d1baeab71e80712af186 100644 (file)
@@ -2,6 +2,7 @@ CFLAGS += -I$(CURDIR)
 
 TEST_GEN_PROGS := load_unaligned_zeropad
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 28f5b781a553f4899e004e5d1450ab4b92de3407..923d531265f8c22d3adf2442e0a48fc7c1fffb5d 100644 (file)
@@ -4,6 +4,7 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
               ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
               perf-hwbreak
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 all: $(TEST_PROGS)
index a7cbd5082e27175822bb2d351b7ebdbf9c0e345d..1fca25c6ace067ffb7a913508b4e13059cb04770 100644 (file)
@@ -8,6 +8,7 @@ $(TEST_PROGS): ../harness.c ../utils.c signal.S
 CFLAGS += -maltivec
 signal_tm: CFLAGS += -mhtm
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 clean:
index 10b35c87a4f4e316315c977b09e2b3bc6ef2b73c..7fc0623d85c314636be8cd3d393fb6c35c643edb 100644 (file)
@@ -29,6 +29,7 @@ endif
 
 ASFLAGS = $(CFLAGS)
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): $(EXTRA_SOURCES)
index 30b8ff8fb82e7a161759bf68363e69dcbcde2304..fcd2dcb8972babf90209b699307bd086f08c5f90 100644 (file)
@@ -5,6 +5,7 @@ ASFLAGS += -O2 -Wall -g -nostdlib -m64
 
 EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S
index da22ca7c38c185a5bb4df90a81fa71a9fbccc6c1..161b8846336fdb324f97833ce821babd9c139eae 100644 (file)
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := ipc_unmuxed
 
 CFLAGS += -I../../../../../usr/include
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index c0e45d2dde25d115b73ae2e14002a8b24167abbc..9fc2cf6fbc92c9214f9978dec423c23147ca359b 100644 (file)
@@ -6,6 +6,7 @@ TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack
        tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \
        $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c ../utils.c
index f8ced26748f84408d902cc4ce58012874c545a58..fb82068c9fda297e1c505c440d7cbe112119b581 100644 (file)
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := test-vphn
 
 CFLAGS += -m64
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 642d4e12abea2e37d92e0acea3fdcf692a64d374..eec2663261f2ac8bfc09085049ea0d908f8851c5 100644 (file)
@@ -56,15 +56,13 @@ unsigned int yield_mod_cnt, nr_abort;
                        printf(fmt, ## __VA_ARGS__);    \
        } while (0)
 
-#if defined(__x86_64__) || defined(__i386__)
+#ifdef __i386__
 
 #define INJECT_ASM_REG "eax"
 
 #define RSEQ_INJECT_CLOBBER \
        , INJECT_ASM_REG
 
-#ifdef __i386__
-
 #define RSEQ_INJECT_ASM(n) \
        "mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \
        "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
@@ -76,9 +74,16 @@ unsigned int yield_mod_cnt, nr_abort;
 
 #elif defined(__x86_64__)
 
+#define INJECT_ASM_REG_P       "rax"
+#define INJECT_ASM_REG         "eax"
+
+#define RSEQ_INJECT_CLOBBER \
+       , INJECT_ASM_REG_P \
+       , INJECT_ASM_REG
+
 #define RSEQ_INJECT_ASM(n) \
-       "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG "\n\t" \
-       "mov (%%" INJECT_ASM_REG "), %%" INJECT_ASM_REG "\n\t" \
+       "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG_P "\n\t" \
+       "mov (%%" INJECT_ASM_REG_P "), %%" INJECT_ASM_REG "\n\t" \
        "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
        "jz 333f\n\t" \
        "222:\n\t" \
@@ -86,10 +91,6 @@ unsigned int yield_mod_cnt, nr_abort;
        "jnz 222b\n\t" \
        "333:\n\t"
 
-#else
-#error "Unsupported architecture"
-#endif
-
 #elif defined(__s390__)
 
 #define RSEQ_INJECT_INPUT \
index 49a6f8c3fdae75cf95925a9e0a27a075f076d096..f9281e8aa31365a39c50c44fd040defcdcf84467 100644 (file)
@@ -232,6 +232,8 @@ directory:
       and the other is a test whether the command leaked memory or not.
       (This one is a preliminary version, it may not work quite right yet,
       but the overall template is there and it should only need tweaks.)
+  - buildebpfPlugin.py:
+      builds all programs in $EBPFDIR.
 
 
 ACKNOWLEDGEMENTS
diff --git a/tools/testing/selftests/tc-testing/bpf/Makefile b/tools/testing/selftests/tc-testing/bpf/Makefile
new file mode 100644 (file)
index 0000000..dc92eb2
--- /dev/null
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+
+APIDIR := ../../../../include/uapi
+TEST_GEN_FILES = action.o
+
+top_srcdir = ../../../../..
+include ../../lib.mk
+
+CLANG ?= clang
+LLC   ?= llc
+PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
+
+ifeq ($(PROBE),)
+  CPU ?= probe
+else
+  CPU ?= generic
+endif
+
+CLANG_SYS_INCLUDES := $(shell $(CLANG) -v -E - </dev/null 2>&1 \
+       | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }')
+
+CLANG_FLAGS = -I. -I$(APIDIR) \
+             $(CLANG_SYS_INCLUDES) \
+             -Wno-compare-distinct-pointer-types
+
+$(OUTPUT)/%.o: %.c
+       $(CLANG) $(CLANG_FLAGS) \
+                -O2 -target bpf -emit-llvm -c $< -o - |      \
+       $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
diff --git a/tools/testing/selftests/tc-testing/bpf/action.c b/tools/testing/selftests/tc-testing/bpf/action.c
new file mode 100644 (file)
index 0000000..c32b99b
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018 Davide Caratti, Red Hat inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+
+__attribute__((section("action-ok"),used)) int action_ok(struct __sk_buff *s)
+{
+       return TC_ACT_OK;
+}
+
+__attribute__((section("action-ko"),used)) int action_ko(struct __sk_buff *s)
+{
+       s->data = 0x0;
+       return TC_ACT_OK;
+}
+
+char _license[] __attribute__((section("license"),used)) = "GPL";
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py
new file mode 100644 (file)
index 0000000..9f0ba10
--- /dev/null
@@ -0,0 +1,66 @@
+'''
+build ebpf program
+'''
+
+import os
+import signal
+from string import Template
+import subprocess
+import time
+from TdcPlugin import TdcPlugin
+from tdc_config import *
+
+class SubPlugin(TdcPlugin):
+    def __init__(self):
+        self.sub_class = 'buildebpf/SubPlugin'
+        self.tap = ''
+        super().__init__()
+
+    def pre_suite(self, testcount, testidlist):
+        super().pre_suite(testcount, testidlist)
+
+        if self.args.buildebpf:
+            self._ebpf_makeall()
+
+    def post_suite(self, index):
+        super().post_suite(index)
+
+        self._ebpf_makeclean()
+
+    def add_args(self, parser):
+        super().add_args(parser)
+
+        self.argparser_group = self.argparser.add_argument_group(
+            'buildebpf',
+            'options for buildebpfPlugin')
+        self.argparser_group.add_argument(
+            '-B', '--buildebpf', action='store_true',
+            help='build eBPF programs')
+
+        return self.argparser
+
+    def _ebpf_makeall(self):
+        if self.args.buildebpf:
+            self._make('all')
+
+    def _ebpf_makeclean(self):
+        if self.args.buildebpf:
+            self._make('clean')
+
+    def _make(self, target):
+        command = 'make -C {} {}'.format(self.args.NAMES['EBPFDIR'], target)
+        proc = subprocess.Popen(command,
+            shell=True,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            env=ENVIR)
+        (rawout, serr) = proc.communicate()
+
+        if proc.returncode != 0 and len(serr) > 0:
+            foutput = serr.decode("utf-8")
+        else:
+            foutput = rawout.decode("utf-8")
+
+        proc.stdout.close()
+        proc.stderr.close()
+        return proc, foutput
index 6f289a49e5ecf01552026bef35534a93d8cd136c..5970cee6d05f26fd9be36a5b1385d62083ab3cfe 100644 (file)
@@ -55,7 +55,6 @@
             "bpf"
         ],
         "setup": [
-            "printf '#include <linux/bpf.h>\nchar l[] __attribute__((section(\"license\"),used))=\"GPL\"; __attribute__((section(\"action\"),used)) int m(struct __sk_buff *s) { return 2; }' | clang -O2 -x c -c - -target bpf -o _b.o",
             [
                 "$TC action flush action bpf",
                 0,
                 255
             ]
         ],
-        "cmdUnderTest": "$TC action add action bpf object-file _b.o index 667",
+        "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ok index 667",
         "expExitCode": "0",
         "verifyCmd": "$TC action get action bpf index 667",
-        "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9]* tag 3b185187f1855c4c( jited)? default-action pipe.*index 667 ref",
+        "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9]* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref",
         "matchCount": "1",
         "teardown": [
-            "$TC action flush action bpf",
-            "rm -f _b.o"
+            "$TC action flush action bpf"
         ]
     },
     {
@@ -81,7 +79,6 @@
             "bpf"
         ],
         "setup": [
-            "printf '#include <linux/bpf.h>\nchar l[] __attribute__((section(\"license\"),used))=\"GPL\"; __attribute__((section(\"action\"),used)) int m(struct __sk_buff *s) { s->data = 0x0; return 2; }' | clang -O2 -x c -c - -target bpf -o _c.o",
             [
                 "$TC action flush action bpf",
                 0,
                 255
             ]
         ],
-        "cmdUnderTest": "$TC action add action bpf object-file _c.o index 667",
+        "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ko index 667",
         "expExitCode": "255",
         "verifyCmd": "$TC action get action bpf index 667",
-        "matchPattern": "action order [0-9]*: bpf _c.o:\\[action\\] id [0-9].*index 667 ref",
+        "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ko\\] id [0-9].*index 667 ref",
         "matchCount": "0",
         "teardown": [
             [
                 0,
                 1,
                 255
-            ],
-            "rm -f _c.o"
+            ]
         ]
     },
     {
index a023d0d62b25c5986a648ff5d8d68fc528252e49..d651bc1501bdb230e0b193d86ff39e3894a483ef 100644 (file)
@@ -16,7 +16,9 @@ NAMES = {
           'DEV2': '',
           'BATCH_FILE': './batch.txt',
           # Name of the namespace to use
-          'NS': 'tcut'
+          'NS': 'tcut',
+          # Directory containing eBPF test programs
+          'EBPFDIR': './bpf'
         }
 
 
index 2352590117042ebf79ddd946a00d8c49d38d786e..35edd61d1663eb6fc0378f606638b7b2cc0e26cb 100644 (file)
@@ -17,6 +17,7 @@
 #include <errno.h>
 #include <sched.h>
 #include <stdbool.h>
+#include <limits.h>
 
 #ifndef SYS_getcpu
 # ifdef __x86_64__
 
 int nerrs = 0;
 
+typedef int (*vgettime_t)(clockid_t, struct timespec *);
+
+vgettime_t vdso_clock_gettime;
+
+typedef long (*vgtod_t)(struct timeval *tv, struct timezone *tz);
+
+vgtod_t vdso_gettimeofday;
+
 typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
 
 getcpu_t vgetcpu;
@@ -95,6 +104,15 @@ static void fill_function_pointers()
                printf("Warning: failed to find getcpu in vDSO\n");
 
        vgetcpu = (getcpu_t) vsyscall_getcpu();
+
+       vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
+       if (!vdso_clock_gettime)
+               printf("Warning: failed to find clock_gettime in vDSO\n");
+
+       vdso_gettimeofday = (vgtod_t)dlsym(vdso, "__vdso_gettimeofday");
+       if (!vdso_gettimeofday)
+               printf("Warning: failed to find gettimeofday in vDSO\n");
+
 }
 
 static long sys_getcpu(unsigned * cpu, unsigned * node,
@@ -103,6 +121,16 @@ static long sys_getcpu(unsigned * cpu, unsigned * node,
        return syscall(__NR_getcpu, cpu, node, cache);
 }
 
+static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
+{
+       return syscall(__NR_clock_gettime, id, ts);
+}
+
+static inline int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+       return syscall(__NR_gettimeofday, tv, tz);
+}
+
 static void test_getcpu(void)
 {
        printf("[RUN]\tTesting getcpu...\n");
@@ -155,10 +183,154 @@ static void test_getcpu(void)
        }
 }
 
+static bool ts_leq(const struct timespec *a, const struct timespec *b)
+{
+       if (a->tv_sec != b->tv_sec)
+               return a->tv_sec < b->tv_sec;
+       else
+               return a->tv_nsec <= b->tv_nsec;
+}
+
+static bool tv_leq(const struct timeval *a, const struct timeval *b)
+{
+       if (a->tv_sec != b->tv_sec)
+               return a->tv_sec < b->tv_sec;
+       else
+               return a->tv_usec <= b->tv_usec;
+}
+
+static char const * const clocknames[] = {
+       [0] = "CLOCK_REALTIME",
+       [1] = "CLOCK_MONOTONIC",
+       [2] = "CLOCK_PROCESS_CPUTIME_ID",
+       [3] = "CLOCK_THREAD_CPUTIME_ID",
+       [4] = "CLOCK_MONOTONIC_RAW",
+       [5] = "CLOCK_REALTIME_COARSE",
+       [6] = "CLOCK_MONOTONIC_COARSE",
+       [7] = "CLOCK_BOOTTIME",
+       [8] = "CLOCK_REALTIME_ALARM",
+       [9] = "CLOCK_BOOTTIME_ALARM",
+       [10] = "CLOCK_SGI_CYCLE",
+       [11] = "CLOCK_TAI",
+};
+
+static void test_one_clock_gettime(int clock, const char *name)
+{
+       struct timespec start, vdso, end;
+       int vdso_ret, end_ret;
+
+       printf("[RUN]\tTesting clock_gettime for clock %s (%d)...\n", name, clock);
+
+       if (sys_clock_gettime(clock, &start) < 0) {
+               if (errno == EINVAL) {
+                       vdso_ret = vdso_clock_gettime(clock, &vdso);
+                       if (vdso_ret == -EINVAL) {
+                               printf("[OK]\tNo such clock.\n");
+                       } else {
+                               printf("[FAIL]\tNo such clock, but __vdso_clock_gettime returned %d\n", vdso_ret);
+                               nerrs++;
+                       }
+               } else {
+                       printf("[WARN]\t clock_gettime(%d) syscall returned error %d\n", clock, errno);
+               }
+               return;
+       }
+
+       vdso_ret = vdso_clock_gettime(clock, &vdso);
+       end_ret = sys_clock_gettime(clock, &end);
+
+       if (vdso_ret != 0 || end_ret != 0) {
+               printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
+                      vdso_ret, errno);
+               nerrs++;
+               return;
+       }
+
+       printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
+              (unsigned long long)start.tv_sec, start.tv_nsec,
+              (unsigned long long)vdso.tv_sec, vdso.tv_nsec,
+              (unsigned long long)end.tv_sec, end.tv_nsec);
+
+       if (!ts_leq(&start, &vdso) || !ts_leq(&vdso, &end)) {
+               printf("[FAIL]\tTimes are out of sequence\n");
+               nerrs++;
+       }
+}
+
+static void test_clock_gettime(void)
+{
+       for (int clock = 0; clock < sizeof(clocknames) / sizeof(clocknames[0]);
+            clock++) {
+               test_one_clock_gettime(clock, clocknames[clock]);
+       }
+
+       /* Also test some invalid clock ids */
+       test_one_clock_gettime(-1, "invalid");
+       test_one_clock_gettime(INT_MIN, "invalid");
+       test_one_clock_gettime(INT_MAX, "invalid");
+}
+
+static void test_gettimeofday(void)
+{
+       struct timeval start, vdso, end;
+       struct timezone sys_tz, vdso_tz;
+       int vdso_ret, end_ret;
+
+       if (!vdso_gettimeofday)
+               return;
+
+       printf("[RUN]\tTesting gettimeofday...\n");
+
+       if (sys_gettimeofday(&start, &sys_tz) < 0) {
+               printf("[FAIL]\tsys_gettimeofday failed (%d)\n", errno);
+               nerrs++;
+               return;
+       }
+
+       vdso_ret = vdso_gettimeofday(&vdso, &vdso_tz);
+       end_ret = sys_gettimeofday(&end, NULL);
+
+       if (vdso_ret != 0 || end_ret != 0) {
+               printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
+                      vdso_ret, errno);
+               nerrs++;
+               return;
+       }
+
+       printf("\t%llu.%06ld %llu.%06ld %llu.%06ld\n",
+              (unsigned long long)start.tv_sec, start.tv_usec,
+              (unsigned long long)vdso.tv_sec, vdso.tv_usec,
+              (unsigned long long)end.tv_sec, end.tv_usec);
+
+       if (!tv_leq(&start, &vdso) || !tv_leq(&vdso, &end)) {
+               printf("[FAIL]\tTimes are out of sequence\n");
+               nerrs++;
+       }
+
+       if (sys_tz.tz_minuteswest == vdso_tz.tz_minuteswest &&
+           sys_tz.tz_dsttime == vdso_tz.tz_dsttime) {
+               printf("[OK]\ttimezones match: minuteswest=%d, dsttime=%d\n",
+                      sys_tz.tz_minuteswest, sys_tz.tz_dsttime);
+       } else {
+               printf("[FAIL]\ttimezones do not match\n");
+               nerrs++;
+       }
+
+       /* And make sure that passing NULL for tz doesn't crash. */
+       vdso_gettimeofday(&vdso, NULL);
+}
+
 int main(int argc, char **argv)
 {
        fill_function_pointers();
 
+       test_clock_gettime();
+       test_gettimeofday();
+
+       /*
+        * Test getcpu() last so that, if something goes wrong setting affinity,
+        * we still run the other tests.
+        */
        test_getcpu();
 
        return nerrs ? 1 : 0;