]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
authorDavid S. Miller <davem@davemloft.net>
Thu, 28 Dec 2017 01:40:32 +0000 (20:40 -0500)
committerDavid S. Miller <davem@davemloft.net>
Thu, 28 Dec 2017 01:40:32 +0000 (20:40 -0500)
Daniel Borkmann says:

====================
pull-request: bpf-next 2017-12-28

The following pull-request contains BPF updates for your *net-next* tree.

The main changes are:

1) Fix incorrect state pruning related to recognition of zero initialized
   stack slots, where stacksafe exploration would mistakenly return a
   positive pruning verdict too early ignoring other slots, from Gianluca.

2) Various BPF to BPF calls related follow-up fixes. Fix an off-by-one
   in maximum call depth check, and rework maximum stack depth tracking
   logic to fix a bypass of the total stack size check reported by Jann.
   Also fix a bug in arm64 JIT where prog->jited_len was uninitialized.
   Addition of various test cases to BPF selftests, from Alexei.

3) Addition of a BPF selftest to test_verifier that is related to BPF to
   BPF calls which demonstrates a late caller stack size increase and
   thus out of bounds access. Fixed above in 2). Test case from Jann.

4) Addition of correlating BPF helper calls, BPF to BPF calls as well
   as BPF maps to bpftool xlated dump in order to allow for better
   BPF program introspection and debugging, from Daniel.

5) Fixing several bugs in BPF to BPF calls kallsyms handling in order
   to get it actually to work for subprogs, from Daniel.

6) Extending sparc64 JIT support for BPF to BPF calls and fix a couple
   of build errors for libbpf on sparc64, from David.

7) Allow narrower context access for BPF dev cgroup typed programs in
   order to adapt to LLVM code generation. Also adjust memlock rlimit
   in the test_dev_cgroup BPF selftest, from Yonghong.

8) Add netdevsim Kconfig entry to BPF selftests since test_offload.py
   relies on netdevsim device being available, from Jakub.

9) Reduce scope of xdp_do_generic_redirect_map() to being static,
   from Xiongwei.

10) Minor cleanups and spelling fixes in BPF verifier, from Colin.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
595 files changed:
Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
Documentation/devicetree/bindings/net/mediatek-net.txt
Documentation/devicetree/bindings/net/phy.txt
Documentation/devicetree/bindings/net/ti,wilink-st.txt [deleted file]
Documentation/devicetree/bindings/net/ti-bluetooth.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
Documentation/networking/batman-adv.rst
Documentation/networking/netdev-features.txt
Documentation/networking/xfrm_proc.txt
Documentation/sysctl/net.txt
MAINTAINERS
Makefile
arch/arm/lib/csumpartialcopyuser.S
arch/arm64/kvm/hyp/debug-sr.c
arch/mips/bcm63xx/dev-enet.c
arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
arch/parisc/boot/compressed/misc.c
arch/parisc/include/asm/thread_info.h
arch/parisc/kernel/entry.S
arch/parisc/kernel/hpmc.S
arch/parisc/kernel/unwind.c
arch/parisc/lib/delay.c
arch/powerpc/net/bpf_jit_comp64.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/sparc/net/bpf_jit_comp_64.c
arch/um/kernel/trap.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/desc.h
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/hypervisor.h
arch/x86/include/asm/irqflags.h
arch/x86/include/asm/kdebug.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/stacktrace.h
arch/x86/include/asm/switch_to.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/traps.h
arch/x86/include/asm/unwind.h
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/doublefault.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/ioport.c
arch/x86/kernel/irq.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/traps.c
arch/x86/kernel/unwind_orc.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/delay.c
arch/x86/mm/fault.c
arch/x86/mm/kasan_init_64.c
arch/x86/power/cpu.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/mmu_pv.c
block/bio.c
block/blk-map.c
block/blk-throttle.c
block/bounce.c
block/kyber-iosched.c
drivers/acpi/apei/erst.c
drivers/acpi/cppc_acpi.c
drivers/bcma/driver_pcie2.c
drivers/block/null_blk.c
drivers/bluetooth/Kconfig
drivers/bluetooth/bluecard_cs.c
drivers/bluetooth/btqcomsmd.c
drivers/bluetooth/btsdio.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_bcm.c
drivers/bluetooth/hci_ll.c
drivers/bluetooth/hci_qca.c
drivers/bluetooth/hci_serdev.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/dma/at_hdmac.c
drivers/dma/dma-jz4740.c
drivers/dma/dmatest.c
drivers/dma/fsl-edma.c
drivers/dma/ioat/init.c
drivers/mfd/cros_ec_spi.c
drivers/mfd/twl4030-audio.c
drivers/mfd/twl6040.c
drivers/misc/pti.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/brcmnand/brcmnand.c
drivers/mtd/nand/gpio.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/net/bonding/bond_options.c
drivers/net/dsa/lan9303-core.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/arc/emac.h
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bcm63xx_enet.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/emac.h
drivers/net/ethernet/ibm/emac/phy.c
drivers/net/ethernet/ibm/emac/rgmii.c
drivers/net/ethernet/ibm/emac/zmii.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/rl.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/micrel/ks8851.c
drivers/net/ethernet/netronome/nfp/bpf/main.c
drivers/net/ethernet/netronome/nfp/bpf/main.h
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/qlogic/qede/qede.h
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
drivers/net/ethernet/qlogic/qede/qede_filter.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/mcdi_port.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/norm_desc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
drivers/net/geneve.c
drivers/net/netdevsim/bpf.c
drivers/net/netdevsim/netdev.c
drivers/net/phy/marvell.c
drivers/net/phy/mdio-xgene.c
drivers/net/phy/mdio_device.c
drivers/net/phy/phylink.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/Kconfig
drivers/net/wireless/ath/ath10k/Makefile
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/debug.h
drivers/net/wireless/ath/ath10k/debugfs_sta.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/hw.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/spectral.h
drivers/net/wireless/ath/ath10k/wmi-ops.h
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi-tlv.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/txrx.c
drivers/net/wireless/ath/ath9k/Kconfig
drivers/net/wireless/ath/ath9k/Makefile
drivers/net/wireless/ath/ath9k/common-spectral.h
drivers/net/wireless/ath/ath9k/dfs.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/wcn36xx/hal.h
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wcn36xx/smd.c
drivers/net/wireless/ath/wcn36xx/smd.h
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/ethtool.c
drivers/net/wireless/ath/wil6210/fw_inc.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/pm.c
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/ath/wil6210/wmi.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
drivers/net/wireless/intel/iwlwifi/Makefile
drivers/net/wireless/intel/iwlwifi/cfg/22000.c [new file with mode: 0644]
drivers/net/wireless/intel/iwlwifi/cfg/8000.c
drivers/net/wireless/intel/iwlwifi/cfg/9000.c
drivers/net/wireless/intel/iwlwifi/cfg/a000.c [deleted file]
drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/fw/smem.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/Makefile
drivers/net/wireless/intel/iwlwifi/mvm/coex.c
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c [new file with mode: 0644]
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.h
drivers/net/wireless/intel/iwlwifi/mvm/rx.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/marvell/mwifiex/cfg80211.c
drivers/net/wireless/marvell/mwl8k.c
drivers/net/wireless/mediatek/Kconfig
drivers/net/wireless/mediatek/Makefile
drivers/net/wireless/mediatek/mt76/Kconfig [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/Makefile [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/debugfs.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/dma.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/dma.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/eeprom.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mac80211.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mmio.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_core.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_dma.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_dma.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_init.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_mac.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_mac.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_main.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_pci.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_phy.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_regs.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_trace.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_trace.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x2_tx.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/trace.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/trace.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/tx.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/util.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/util.h [new file with mode: 0644]
drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
drivers/net/wireless/realtek/rtlwifi/base.c
drivers/net/wireless/realtek/rtlwifi/pci.c
drivers/net/wireless/realtek/rtlwifi/pci.h
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
drivers/net/wireless/realtek/rtlwifi/wifi.h
drivers/net/wireless/ti/wl1251/init.c
drivers/net/wireless/ti/wlcore/acx.c
drivers/net/wireless/ti/wlcore/acx.h
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/tx.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/of/of_mdio.c
drivers/parisc/lba_pci.c
drivers/pci/pci-driver.c
drivers/s390/net/Kconfig
drivers/s390/net/lcs.c
drivers/s390/net/lcs.h
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_l2.h
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3.h
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/linit.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_spi.c
drivers/spi/spi-armada-3700.c
drivers/spi/spi-atmel.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sun4i.c
drivers/spi/spi-xilinx.c
drivers/ssb/Kconfig
drivers/target/target_core_pscsi.c
fs/cramfs/Kconfig
fs/exec.c
fs/ext4/extents.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/namespace.c
fs/super.c
include/kvm/arm_arch_timer.h
include/linux/bio.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/bpf_verifier.h
include/linux/intel-pti.h [new file with mode: 0644]
include/linux/ipv6.h
include/linux/mdio.h
include/linux/mfd/rtsx_pci.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/pti.h [deleted file]
include/linux/rtnetlink.h
include/linux/spi/spi.h
include/net/cfg80211.h
include/net/inet_sock.h
include/net/netns/core.h
include/net/pkt_cls.h
include/net/pkt_sched.h
include/net/sch_generic.h
include/net/sock.h
include/net/xfrm.h
include/trace/events/kvm.h
include/trace/events/sock.h
include/trace/events/tcp.h
include/uapi/linux/batadv_packet.h [new file with mode: 0644]
include/uapi/linux/batman_adv.h
include/uapi/linux/l2tp.h
kernel/bpf/verifier.c
kernel/time/posix-timers.c
lib/test_bpf.c
mm/backing-dev.c
net/batman-adv/Kconfig
net/batman-adv/Makefile
net/batman-adv/bat_algo.c
net/batman-adv/bat_algo.h
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_iv_ogm.h
net/batman-adv/bat_v.c
net/batman-adv/bat_v.h
net/batman-adv/bat_v_elp.c
net/batman-adv/bat_v_elp.h
net/batman-adv/bat_v_ogm.c
net/batman-adv/bat_v_ogm.h
net/batman-adv/bitarray.c
net/batman-adv/bitarray.h
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/debugfs.c
net/batman-adv/debugfs.h
net/batman-adv/distributed-arp-table.c
net/batman-adv/distributed-arp-table.h
net/batman-adv/fragmentation.c
net/batman-adv/fragmentation.h
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/gateway_common.c
net/batman-adv/gateway_common.h
net/batman-adv/hard-interface.c
net/batman-adv/hard-interface.h
net/batman-adv/hash.c
net/batman-adv/hash.h
net/batman-adv/icmp_socket.c
net/batman-adv/icmp_socket.h
net/batman-adv/log.c
net/batman-adv/log.h
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/multicast.c
net/batman-adv/multicast.h
net/batman-adv/netlink.c
net/batman-adv/netlink.h
net/batman-adv/network-coding.c
net/batman-adv/network-coding.h
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/packet.h [deleted file]
net/batman-adv/routing.c
net/batman-adv/routing.h
net/batman-adv/send.c
net/batman-adv/send.h
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/sysfs.c
net/batman-adv/sysfs.h
net/batman-adv/tp_meter.c
net/batman-adv/tp_meter.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/tvlv.c
net/batman-adv/tvlv.h
net/batman-adv/types.h
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_debugfs.c
net/bluetooth/hci_request.c
net/bridge/br_netlink.c
net/bridge/br_sysfs_br.c
net/core/dev.c
net/core/ethtool.c
net/core/flow_dissector.c
net/core/net_namespace.c
net/core/skbuff.c
net/core/sock.c
net/dccp/proto.c
net/ipv4/af_inet.c
net/ipv4/esp4.c
net/ipv4/esp4_offload.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_gre.c
net/ipv4/tcp.c
net/ipv4/tcp_diag.c
net/ipv4/tcp_ipv4.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv6/af_inet6.c
net/ipv6/esp6.c
net/ipv6/esp6_offload.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ipv6_sockglue.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_mode_tunnel.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_debugfs.c
net/l2tp/l2tp_netlink.c
net/openvswitch/flow.c
net/packet/af_packet.c
net/rds/bind.c
net/rds/tcp.c
net/sched/act_police.c
net/sched/cls_api.c
net/sched/cls_bpf.c
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_cbq.c
net/sched/sch_cbs.c
net/sched/sch_choke.c
net/sched/sch_codel.c
net/sched/sch_drr.c
net/sched/sch_dsmark.c
net/sched/sch_fifo.c
net/sched/sch_fq.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sched/sch_gred.c
net/sched/sch_hfsc.c
net/sched/sch_hhf.c
net/sched/sch_htb.c
net/sched/sch_ingress.c
net/sched/sch_mq.c
net/sched/sch_mqprio.c
net/sched/sch_multiq.c
net/sched/sch_netem.c
net/sched/sch_pie.c
net/sched/sch_plug.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_sfb.c
net/sched/sch_sfq.c
net/sched/sch_tbf.c
net/sched/sch_teql.c
net/sctp/debug.c
net/sctp/endpointola.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/ulpqueue.c
net/socket.c
net/tipc/group.c
net/wireless/Makefile
net/wireless/certs/sforshee.hex [new file with mode: 0644]
net/wireless/certs/sforshee.x509 [deleted file]
net/wireless/nl80211.c
net/xfrm/xfrm_device.c
sound/core/rawmidi.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/usb/mixer.c
sound/usb/quirks.c
tools/arch/s390/include/uapi/asm/bpf_perf_event.h
tools/kvm/kvm_stat/kvm_stat
tools/kvm/kvm_stat/kvm_stat.txt
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/test_offload.py
tools/testing/selftests/bpf/test_progs.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/net/config
tools/testing/selftests/net/rtnetlink.sh
virt/kvm/arm/arch_timer.c
virt/kvm/arm/arm.c
virt/kvm/arm/mmio.c
virt/kvm/arm/mmu.c

index 376fa2f50e6bc9b41052928037acd4b3a382d380..956bb046e599d576e3f881b2901e0d369a3c9802 100644 (file)
@@ -13,7 +13,6 @@ Required properties:
                  at25df321a
                  at25df641
                  at26df081a
-                 en25s64
                  mr25h128
                  mr25h256
                  mr25h10
@@ -33,7 +32,6 @@ Required properties:
                  s25fl008k
                  s25fl064k
                  sst25vf040b
-                 sst25wf040b
                  m25p40
                  m25p80
                  m25p16
index 214eaa9a6683861fd529d2af6fb84e361b27d80d..53c13ee384a49bec1163a6dc4239044296dbf067 100644 (file)
@@ -28,7 +28,7 @@ Required properties:
 - mediatek,sgmiisys: phandle to the syscon node that handles the SGMII setup
        which is required for those SoCs equipped with SGMII such as MT7622 SoC.
 - mediatek,pctl: phandle to the syscon node that handles the ports slew rate
-       and driver current
+       and driver current: only for MT2701 and MT7623 SoC
 
 Optional properties:
 - interrupt-parent: Should be the phandle for the interrupt controller
index 72860ce7f61016b6bd91186d2cb7a77103357b27..d2169a56f5e37d1225114e8e66cb6614474335e8 100644 (file)
@@ -55,10 +55,10 @@ Optional Properties:
 
 - reset-gpios: The GPIO phandle and specifier for the PHY reset signal.
 
-- reset-delay-us: Delay after the reset was asserted in microseconds.
+- reset-assert-us: Delay after the reset was asserted in microseconds.
   If this property is missing the delay will be skipped.
 
-- reset-post-delay-us: Delay after the reset was deasserted in microseconds.
+- reset-deassert-us: Delay after the reset was deasserted in microseconds.
   If this property is missing the delay will be skipped.
 
 Example:
@@ -70,6 +70,6 @@ ethernet-phy@0 {
        reg = <0>;
 
        reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
-       reset-delay-us = <1000>;
-       reset-post-delay-us = <2000>;
+       reset-assert-us = <1000>;
+       reset-deassert-us = <2000>;
 };
diff --git a/Documentation/devicetree/bindings/net/ti,wilink-st.txt b/Documentation/devicetree/bindings/net/ti,wilink-st.txt
deleted file mode 100644 (file)
index 1649c1f..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-TI WiLink 7/8 (wl12xx/wl18xx) Shared Transport BT/FM/GPS devices
-
-TI WiLink devices have a UART interface for providing Bluetooth, FM radio,
-and GPS over what's called "shared transport". The shared transport is
-standard BT HCI protocol with additional channels for the other functions.
-
-These devices also have a separate WiFi interface as described in
-wireless/ti,wlcore.txt.
-
-This bindings follows the UART slave device binding in
-../serial/slave-device.txt.
-
-Required properties:
- - compatible: should be one of the following:
-    "ti,wl1271-st"
-    "ti,wl1273-st"
-    "ti,wl1281-st"
-    "ti,wl1283-st"
-    "ti,wl1285-st"
-    "ti,wl1801-st"
-    "ti,wl1805-st"
-    "ti,wl1807-st"
-    "ti,wl1831-st"
-    "ti,wl1835-st"
-    "ti,wl1837-st"
-
-Optional properties:
- - enable-gpios : GPIO signal controlling enabling of BT. Active high.
- - vio-supply : Vio input supply (1.8V)
- - vbat-supply : Vbat input supply (2.9-4.8V)
- - clocks : Must contain an entry, for each entry in clock-names.
-   See ../clocks/clock-bindings.txt for details.
- - clock-names : Must include the following entry:
-   "ext_clock" (External clock provided to the TI combo chip).
-
-Example:
-
-&serial0 {
-       compatible = "ns16550a";
-       ...
-       bluetooth {
-               compatible = "ti,wl1835-st";
-               enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
-               clocks = <&clk32k_wl18xx>;
-               clock-names = "ext_clock";
-       };
-};
diff --git a/Documentation/devicetree/bindings/net/ti-bluetooth.txt b/Documentation/devicetree/bindings/net/ti-bluetooth.txt
new file mode 100644 (file)
index 0000000..6d03ff8
--- /dev/null
@@ -0,0 +1,61 @@
+Texas Instruments Bluetooth Chips
+---------------------------------
+
+This documents the binding structure and common properties for serial
+attached TI Bluetooth devices. The following chips are included in this
+binding:
+
+* TI CC256x Bluetooth devices
+* TI WiLink 7/8 (wl12xx/wl18xx) Shared Transport BT/FM/GPS devices
+
+TI WiLink devices have a UART interface for providing Bluetooth, FM radio,
+and GPS over what's called "shared transport". The shared transport is
+standard BT HCI protocol with additional channels for the other functions.
+
+TI WiLink devices also have a separate WiFi interface as described in
+wireless/ti,wlcore.txt.
+
+This bindings follows the UART slave device binding in
+../serial/slave-device.txt.
+
+Required properties:
+ - compatible: should be one of the following:
+    "ti,cc2560"
+    "ti,wl1271-st"
+    "ti,wl1273-st"
+    "ti,wl1281-st"
+    "ti,wl1283-st"
+    "ti,wl1285-st"
+    "ti,wl1801-st"
+    "ti,wl1805-st"
+    "ti,wl1807-st"
+    "ti,wl1831-st"
+    "ti,wl1835-st"
+    "ti,wl1837-st"
+
+Optional properties:
+ - enable-gpios : GPIO signal controlling enabling of BT. Active high.
+ - vio-supply : Vio input supply (1.8V)
+ - vbat-supply : Vbat input supply (2.9-4.8V)
+ - clocks : Must contain an entry, for each entry in clock-names.
+   See ../clocks/clock-bindings.txt for details.
+ - clock-names : Must include the following entry:
+   "ext_clock" (External clock provided to the TI combo chip).
+ - nvmem-cells: phandle to nvmem data cell that contains a 6 byte BD address
+   with the most significant byte first (big-endian).
+ - nvmem-cell-names: "bd-address" (required when nvmem-cells is specified)
+
+Example:
+
+&serial0 {
+       compatible = "ns16550a";
+       ...
+       bluetooth {
+               compatible = "ti,wl1835-st";
+               enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+               clocks = <&clk32k_wl18xx>;
+               clock-names = "ext_clock";
+               nvmem-cells = <&bd_address>;
+               nvmem-cell-names = "bd-address";
+       };
+};
diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
new file mode 100644 (file)
index 0000000..0c17a0e
--- /dev/null
@@ -0,0 +1,32 @@
+* MediaTek mt76xx devices
+
+This node provides properties for configuring the MediaTek mt76xx wireless
+device. The node is expected to be specified as a child node of the PCI
+controller to which the wireless chip is connected.
+
+Optional properties:
+
+- mac-address: See ethernet.txt in the parent directory
+- local-mac-address: See ethernet.txt in the parent directory
+- ieee80211-freq-limit: See ieee80211.txt
+- mediatek,mtd-eeprom: Specify a MTD partition + offset containing EEPROM data
+
+Optional nodes:
+- led: Properties for a connected LED
+  Optional properties:
+    - led-sources: See Documentation/devicetree/bindings/leds/common.txt
+
+&pcie {
+       pcie0 {
+               wifi@0,0 {
+                       compatible = "mediatek,mt76";
+                       reg = <0x0000 0 0 0 0>;
+                       ieee80211-freq-limit = <5000000 6000000>;
+                       mediatek,mtd-eeprom = <&factory 0x8000>;
+
+                       led {
+                               led-sources = <2>;
+                       };
+               };
+       };
+};
index 74d7f0af209c8178f078b99a9dd460d44d430f8c..3d2a031217da0851acad1ce8cc1716b376801adc 100644 (file)
@@ -41,6 +41,9 @@ Optional properties:
 - qcom,msi_addr: MSI interrupt address.
 - qcom,msi_base: Base value to add before writing MSI data into
                MSI address register.
+- qcom,ath10k-calibration-variant: string to search for in the board-2.bin
+                                  variant list with the same bus and device
+                                  specific ids
 - qcom,ath10k-calibration-data : calibration data + board specific data
                                 as an array, the length can vary between
                                 hw versions.
index 5bf13960f7f4a3c826c10b1e15a618df82d82403..e3c48b20b1a691b37d0b425251a257c682a38eca 100644 (file)
@@ -12,24 +12,30 @@ Required properties:
   - "fsl,imx53-ecspi" for SPI compatible with the one integrated on i.MX53 and later Soc
 - reg : Offset and length of the register set for the device
 - interrupts : Should contain CSPI/eCSPI interrupt
-- cs-gpios : Specifies the gpio pins to be used for chipselects.
 - clocks : Clock specifiers for both ipg and per clocks.
 - clock-names : Clock names should include both "ipg" and "per"
 See the clock consumer binding,
        Documentation/devicetree/bindings/clock/clock-bindings.txt
-- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
-               Documentation/devicetree/bindings/dma/dma.txt
-- dma-names: DMA request names should include "tx" and "rx" if present.
 
-Obsolete properties:
-- fsl,spi-num-chipselects : Contains the number of the chipselect
+Recommended properties:
+- cs-gpios : GPIOs to use as chip selects, see spi-bus.txt.  While the native chip
+select lines can be used, they appear to always generate a pulse between each
+word of a transfer.  Most use cases will require GPIO based chip selects to
+generate a valid transaction.
 
 Optional properties:
+- num-cs :  Number of total chip selects, see spi-bus.txt.
+- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
+Documentation/devicetree/bindings/dma/dma.txt.
+- dma-names: DMA request names, if present, should include "tx" and "rx".
 - fsl,spi-rdy-drctl: Integer, representing the value of DRCTL, the register
 controlling the SPI_READY handling. Note that to enable the DRCTL consideration,
 the SPI_READY mode-flag needs to be set too.
 Valid values are: 0 (disabled), 1 (edge-triggered burst) and 2 (level-triggered burst).
 
+Obsolete properties:
+- fsl,spi-num-chipselects : Contains the number of the chipselect
+
 Example:
 
 ecspi@70010000 {
index a342b2cc3dc64037e4c47a2ea4a2d12f47702c8d..245fb6c0ab6f55c828296c279c8867d47f629d1a 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ==========
 batman-adv
 ==========
index 7413eb05223b104f4d8a00d9615dbd694b596cf4..c77f9d57eb91584878d26061450c3e08a5b19030 100644 (file)
@@ -163,3 +163,12 @@ This requests that the NIC receive all possible frames, including errored
 frames (such as bad FCS, etc).  This can be helpful when sniffing a link with
 bad packets on it.  Some NICs may receive more packets if also put into normal
 PROMISC mode.
+
+*  rx-gro-hw
+
+This requests that the NIC enables Hardware GRO (generic receive offload).
+Hardware GRO is basically the exact reverse of TSO, and is generally
+stricter than Hardware LRO.  A packet stream merged by Hardware GRO must
+be re-segmentable by GSO or TSO back to the exact original packet stream.
+Hardware GRO is dependent on RXCSUM since every packet successfully merged
+by hardware must also have the checksum verified by hardware.
index d0d8bafa9016b7f435ac5529ca6d6e1c736b42dc..2eae619ab67b05c29ae95438906ef21e94fe0506 100644 (file)
@@ -5,13 +5,15 @@ Masahide NAKAMURA <nakam@linux-ipv6.org>
 
 Transformation Statistics
 -------------------------
-xfrm_proc is a statistics shown factor dropped by transformation
-for developer.
-It is a counter designed from current transformation source code
-and defined like linux private MIB.
 
-Inbound statistics
-~~~~~~~~~~~~~~~~~~
+The xfrm_proc code is a set of statistics showing numbers of packets
+dropped by the transformation code and why.  These counters are defined
+as part of the linux private MIB.  These counters can be viewed in
+/proc/net/xfrm_stat.
+
+
+Inbound errors
+~~~~~~~~~~~~~~
 XfrmInError:
        All errors which is not matched others
 XfrmInBufferError:
@@ -46,6 +48,10 @@ XfrmInPolBlock:
        Policy discards
 XfrmInPolError:
        Policy error
+XfrmAcquireError:
+       State hasn't been fully acquired before use
+XfrmFwdHdrError:
+       Forward routing of a packet is not allowed
 
 Outbound errors
 ~~~~~~~~~~~~~~~
@@ -72,3 +78,5 @@ XfrmOutPolDead:
        Policy is dead
 XfrmOutPolError:
        Policy error
+XfrmOutStateInvalid:
+       State is invalid, perhaps expired
index b67044a2575f51b9d24f85b1a58a2fe6075b3312..35c62f522754a79f8ecaba74dd8c84a038a8928f 100644 (file)
@@ -95,7 +95,9 @@ dev_weight
 --------------
 
 The maximum number of packets that kernel can handle on a NAPI interrupt,
-it's a Per-CPU variable.
+it's a Per-CPU variable. For drivers that support LRO or GRO_HW, a hardware
+aggregated packet is counted as one packet in this context.
+
 Default: 64
 
 dev_weight_rx_bias
index 810415d10b03b246eddb0015914a9c20f4e1b492..753799d24cd9280e955941920d28377668d096f2 100644 (file)
@@ -2564,6 +2564,7 @@ S:        Maintained
 F:     Documentation/ABI/testing/sysfs-class-net-batman-adv
 F:     Documentation/ABI/testing/sysfs-class-net-mesh
 F:     Documentation/networking/batman-adv.rst
+F:     include/uapi/linux/batadv_packet.h
 F:     include/uapi/linux/batman_adv.h
 F:     net/batman-adv/
 
@@ -2689,7 +2690,6 @@ F:        drivers/mtd/devices/block2mtd.c
 
 BLUETOOTH DRIVERS
 M:     Marcel Holtmann <marcel@holtmann.org>
-M:     Gustavo Padovan <gustavo@padovan.org>
 M:     Johan Hedberg <johan.hedberg@gmail.com>
 L:     linux-bluetooth@vger.kernel.org
 W:     http://www.bluez.org/
@@ -2700,7 +2700,6 @@ F:        drivers/bluetooth/
 
 BLUETOOTH SUBSYSTEM
 M:     Marcel Holtmann <marcel@holtmann.org>
-M:     Gustavo Padovan <gustavo@padovan.org>
 M:     Johan Hedberg <johan.hedberg@gmail.com>
 L:     linux-bluetooth@vger.kernel.org
 W:     http://www.bluez.org/
index 3f4d157add54018fbea707a8129f2326b463dec6..7e02f951b284187d5354c2b7bd39b0ef1bf5d903 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index 1712f132b80d2402d94d72ea974a0c3326fa2f52..b83fdc06286a64ece150fb7e419bc587e47c3e34 100644 (file)
                .pushsection .text.fixup,"ax"
                .align  4
 9001:          mov     r4, #-EFAULT
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+               ldr     r5, [sp, #9*4]          @ *err_ptr
+#else
                ldr     r5, [sp, #8*4]          @ *err_ptr
+#endif
                str     r4, [r5]
                ldmia   sp, {r1, r2}            @ retrieve dst, len
                add     r2, r2, r1
index 321c9c05dd9e09fc0c745a4543a286b7628f00a4..f4363d40e2cd7fd62d40d826d5296c95f15cde9f 100644 (file)
@@ -74,6 +74,9 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
 {
        u64 reg;
 
+       /* Clear pmscr in case of early return */
+       *pmscr_el1 = 0;
+
        /* SPE present on this CPU? */
        if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
                                                  ID_AA64DFR0_PMSVER_SHIFT))
index e8284771d62036d04c8f1942b80d56bd3bb71d15..07b4c65a88a43708467626b410baf79936bac203 100644 (file)
@@ -265,6 +265,14 @@ int __init bcm63xx_enet_register(int unit,
                dpd->dma_chan_width = ENETDMA_CHAN_WIDTH;
        }
 
+       if (unit == 0) {
+               dpd->rx_chan = 0;
+               dpd->tx_chan = 1;
+       } else {
+               dpd->rx_chan = 2;
+               dpd->tx_chan = 3;
+       }
+
        ret = platform_device_register(pdev);
        if (ret)
                return ret;
index c0bd47444cffdee2bc244efaf6b41a9b990bb01a..da39e4d326baa7aa2b5dc7a20f6ab92956216b55 100644 (file)
@@ -55,6 +55,10 @@ struct bcm63xx_enet_platform_data {
 
        /* DMA descriptor shift */
        unsigned int dma_desc_shift;
+
+       /* dma channel ids */
+       int rx_chan;
+       int tx_chan;
 };
 
 /*
index 9345b44b86f036572e33721eb80e9bbbe4493aa4..f57118e1f6b4265257799ae2cf8ea356077e20b9 100644 (file)
@@ -123,8 +123,8 @@ int puts(const char *s)
        while ((nuline = strchr(s, '\n')) != NULL) {
                if (nuline != s)
                        pdc_iodc_print(s, nuline - s);
-                       pdc_iodc_print("\r\n", 2);
-                       s = nuline + 1;
+               pdc_iodc_print("\r\n", 2);
+               s = nuline + 1;
        }
        if (*s != '\0')
                pdc_iodc_print(s, strlen(s));
index c980a02a52bc0dda0a23b205f59d1d86438553f2..598c8d60fa5e602cc9303e1986ada9680d64feb3 100644 (file)
@@ -35,7 +35,12 @@ struct thread_info {
 
 /* thread information allocation */
 
+#ifdef CONFIG_IRQSTACKS
+#define THREAD_SIZE_ORDER      2 /* PA-RISC requires at least 16k stack */
+#else
 #define THREAD_SIZE_ORDER      3 /* PA-RISC requires at least 32k stack */
+#endif
+
 /* Be sure to hunt all references to this down when you change the size of
  * the kernel stack */
 #define THREAD_SIZE             (PAGE_SIZE << THREAD_SIZE_ORDER)
index a4fd296c958e8e14f13a913aca50510b11eb49b7..f3cecf5117cf8ab14724f0ea3535220c3224d569 100644 (file)
@@ -878,9 +878,6 @@ ENTRY_CFI(syscall_exit_rfi)
        STREG   %r19,PT_SR7(%r16)
 
 intr_return:
-       /* NOTE: Need to enable interrupts incase we schedule. */
-       ssm     PSW_SM_I, %r0
-
        /* check for reschedule */
        mfctl   %cr30,%r1
        LDREG   TI_FLAGS(%r1),%r19      /* sched.h: TIF_NEED_RESCHED */
@@ -907,6 +904,11 @@ intr_check_sig:
        LDREG   PT_IASQ1(%r16), %r20
        cmpib,COND(=),n 0,%r20,intr_restore /* backward */
 
+       /* NOTE: We need to enable interrupts if we have to deliver
+        * signals. We used to do this earlier but it caused kernel
+        * stack overflows. */
+       ssm     PSW_SM_I, %r0
+
        copy    %r0, %r25                       /* long in_syscall = 0 */
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29                  /* Reference param save area */
@@ -958,6 +960,10 @@ intr_do_resched:
        cmpib,COND(=)   0, %r20, intr_do_preempt
        nop
 
+       /* NOTE: We need to enable interrupts if we schedule.  We used
+        * to do this earlier but it caused kernel stack overflows. */
+       ssm     PSW_SM_I, %r0
+
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29          /* Reference param save area */
 #endif
index e3a8e5e4d5de75897adcea4134f87c7246f60646..8d072c44f300c16d45ba8f4ee0c2eee6435e4ddd 100644 (file)
@@ -305,6 +305,7 @@ ENDPROC_CFI(os_hpmc)
 
 
        __INITRODATA
+       .align 4
        .export os_hpmc_size
 os_hpmc_size:
        .word .os_hpmc_end-.os_hpmc
index 5a657986ebbf4bef7beff4e8c8d20f1343872347..143f90e2f9f3c631616d4af52f0fe3fa08f44af9 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/kallsyms.h>
 #include <linux/sort.h>
-#include <linux/sched.h>
 
 #include <linux/uaccess.h>
 #include <asm/assembly.h>
index 7eab4bb8abe630b14c54c3b457285b4228607dc6..66e506520505d8a3245d49d492831df5e3bbb42a 100644 (file)
@@ -16,9 +16,7 @@
 #include <linux/preempt.h>
 #include <linux/init.h>
 
-#include <asm/processor.h>
 #include <asm/delay.h>
-
 #include <asm/special_insns.h>    /* for mfctl() */
 #include <asm/processor.h> /* for boot_cpu_data */
 
index d5a5bc43cf8f96f1d648457a432d04afa200712b..6771c63b2bec4cafdf69c113d8f05538e6453b54 100644 (file)
@@ -763,7 +763,8 @@ emit_clear:
                        func = (u8 *) __bpf_call_base + imm;
 
                        /* Save skb pointer if we need to re-cache skb data */
-                       if (bpf_helper_changes_pkt_data(func))
+                       if ((ctx->seen & SEEN_SKB) &&
+                           bpf_helper_changes_pkt_data(func))
                                PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
 
                        bpf_jit_emit_func_call(image, ctx, (u64)func);
@@ -772,7 +773,8 @@ emit_clear:
                        PPC_MR(b2p[BPF_REG_0], 3);
 
                        /* refresh skb cache */
-                       if (bpf_helper_changes_pkt_data(func)) {
+                       if ((ctx->seen & SEEN_SKB) &&
+                           bpf_helper_changes_pkt_data(func)) {
                                /* reload skb pointer to r3 */
                                PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
                                bpf_jit_emit_skb_loads(image, ctx);
index f4baa8c514d345913a1be161df51d3ccd529c340..1dfadbd126f341838a533bc515da9b6d5a56b966 100644 (file)
@@ -55,8 +55,7 @@ struct bpf_jit {
 #define SEEN_LITERAL   8       /* code uses literals */
 #define SEEN_FUNC      16      /* calls C functions */
 #define SEEN_TAIL_CALL 32      /* code uses tail calls */
-#define SEEN_SKB_CHANGE        64      /* code changes skb data */
-#define SEEN_REG_AX    128     /* code uses constant blinding */
+#define SEEN_REG_AX    64      /* code uses constant blinding */
 #define SEEN_STACK     (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
 
 /*
@@ -448,12 +447,12 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
                        EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
                                      REG_15, 152);
        }
-       if (jit->seen & SEEN_SKB)
+       if (jit->seen & SEEN_SKB) {
                emit_load_skb_data_hlen(jit);
-       if (jit->seen & SEEN_SKB_CHANGE)
                /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
                EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
                              STK_OFF_SKBP);
+       }
 }
 
 /*
@@ -983,8 +982,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                EMIT2(0x0d00, REG_14, REG_W1);
                /* lgr %b0,%r2: load return value into %b0 */
                EMIT4(0xb9040000, BPF_REG_0, REG_2);
-               if (bpf_helper_changes_pkt_data((void *)func)) {
-                       jit->seen |= SEEN_SKB_CHANGE;
+               if ((jit->seen & SEEN_SKB) &&
+                   bpf_helper_changes_pkt_data((void *)func)) {
                        /* lg %b1,ST_OFF_SKBP(%r15) */
                        EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
                                      REG_15, STK_OFF_SKBP);
index be3136f142a9993e0c6c8cfa1d651b1685654a73..a8103a84b4ac4a2ec84c44c302862b3aed8b7e7f 100644 (file)
@@ -113,7 +113,7 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
+       printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
               tsk->comm, task_pid_nr(tsk), address,
               (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
index 815c03d7a765524424b92866b1567ea2a43695d4..41363f46797bf9f74dd922fadbd2a3f190e8c9bb 100644 (file)
@@ -154,7 +154,7 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
+       printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
               tsk->comm, task_pid_nr(tsk), address,
               (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
index 991a3ab76e7a7f96df2558b36ee5489c55b78cc7..635fdefd4ae2e682f66399f0742967b2e5d56c02 100644 (file)
@@ -1245,14 +1245,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                u8 *func = ((u8 *)__bpf_call_base) + imm;
 
                ctx->saw_call = true;
+               if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
+                       emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx);
 
                emit_call((u32 *)func, ctx);
                emit_nop(ctx);
 
                emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx);
 
-               if (bpf_helper_changes_pkt_data(func) && ctx->saw_ld_abs_ind)
-                       load_skb_regs(ctx, bpf2sparc[BPF_REG_6]);
+               if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
+                       load_skb_regs(ctx, L7);
                break;
        }
 
index 4e6fcb32620ffb2125f648622499e5bf7c950e72..428644175956231aad112a0ce221452913736635 100644 (file)
@@ -150,7 +150,7 @@ static void show_segv_info(struct uml_pt_regs *regs)
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p sp %p error %x",
+       printk("%s%s[%d]: segfault at %lx ip %px sp %px error %x",
                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
                tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi),
                (void *)UPT_IP(regs), (void *)UPT_SP(regs),
index 4838037f97f6edffda62b5b045c837fcc29402f0..bd8b57a5c874bc37ab23fb0489f840ba0b17168e 100644 (file)
@@ -941,7 +941,8 @@ ENTRY(debug)
        movl    %esp, %eax                      # pt_regs pointer
 
        /* Are we currently on the SYSENTER stack? */
-       PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
+       movl    PER_CPU_VAR(cpu_entry_area), %ecx
+       addl    $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
        subl    %eax, %ecx      /* ecx = (end of SYSENTER_stack) - esp */
        cmpl    $SIZEOF_SYSENTER_stack, %ecx
        jb      .Ldebug_from_sysenter_stack
@@ -984,7 +985,8 @@ ENTRY(nmi)
        movl    %esp, %eax                      # pt_regs pointer
 
        /* Are we currently on the SYSENTER stack? */
-       PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
+       movl    PER_CPU_VAR(cpu_entry_area), %ecx
+       addl    $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
        subl    %eax, %ecx      /* ecx = (end of SYSENTER_stack) - esp */
        cmpl    $SIZEOF_SYSENTER_stack, %ecx
        jb      .Lnmi_from_sysenter_stack
index f81d50d7ceacdefa06d61482687937096c68421c..423885bee398c6c9cb80f3bd4a2ec8317e41062a 100644 (file)
@@ -140,6 +140,64 @@ END(native_usergs_sysret64)
  * with them due to bugs in both AMD and Intel CPUs.
  */
 
+       .pushsection .entry_trampoline, "ax"
+
+/*
+ * The code in here gets remapped into cpu_entry_area's trampoline.  This means
+ * that the assembler and linker have the wrong idea as to where this code
+ * lives (and, in fact, it's mapped more than once, so it's not even at a
+ * fixed address).  So we can't reference any symbols outside the entry
+ * trampoline and expect it to work.
+ *
+ * Instead, we carefully abuse %rip-relative addressing.
+ * _entry_trampoline(%rip) refers to the start of the remapped) entry
+ * trampoline.  We can thus find cpu_entry_area with this macro:
+ */
+
+#define CPU_ENTRY_AREA \
+       _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
+
+/* The top word of the SYSENTER stack is hot and is usable as scratch space. */
+#define RSP_SCRATCH    CPU_ENTRY_AREA_SYSENTER_stack + \
+                       SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA
+
+ENTRY(entry_SYSCALL_64_trampoline)
+       UNWIND_HINT_EMPTY
+       swapgs
+
+       /* Stash the user RSP. */
+       movq    %rsp, RSP_SCRATCH
+
+       /* Load the top of the task stack into RSP */
+       movq    CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp
+
+       /* Start building the simulated IRET frame. */
+       pushq   $__USER_DS                      /* pt_regs->ss */
+       pushq   RSP_SCRATCH                     /* pt_regs->sp */
+       pushq   %r11                            /* pt_regs->flags */
+       pushq   $__USER_CS                      /* pt_regs->cs */
+       pushq   %rcx                            /* pt_regs->ip */
+
+       /*
+        * x86 lacks a near absolute jump, and we can't jump to the real
+        * entry text with a relative jump.  We could push the target
+        * address and then use retq, but this destroys the pipeline on
+        * many CPUs (wasting over 20 cycles on Sandy Bridge).  Instead,
+        * spill RDI and restore it in a second-stage trampoline.
+        */
+       pushq   %rdi
+       movq    $entry_SYSCALL_64_stage2, %rdi
+       jmp     *%rdi
+END(entry_SYSCALL_64_trampoline)
+
+       .popsection
+
+ENTRY(entry_SYSCALL_64_stage2)
+       UNWIND_HINT_EMPTY
+       popq    %rdi
+       jmp     entry_SYSCALL_64_after_hwframe
+END(entry_SYSCALL_64_stage2)
+
 ENTRY(entry_SYSCALL_64)
        UNWIND_HINT_EMPTY
        /*
@@ -330,8 +388,24 @@ syscall_return_via_sysret:
        popq    %rsi    /* skip rcx */
        popq    %rdx
        popq    %rsi
+
+       /*
+        * Now all regs are restored except RSP and RDI.
+        * Save old stack pointer and switch to trampoline stack.
+        */
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+
+       pushq   RSP-RDI(%rdi)   /* RSP */
+       pushq   (%rdi)          /* RDI */
+
+       /*
+        * We are on the trampoline stack.  All regs except RDI are live.
+        * We can do future final exit work right here.
+        */
+
        popq    %rdi
-       movq    RSP-ORIG_RAX(%rsp), %rsp
+       popq    %rsp
        USERGS_SYSRET64
 END(entry_SYSCALL_64)
 
@@ -466,12 +540,13 @@ END(irq_entries_start)
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
-       pushfq
-       testl $X86_EFLAGS_IF, (%rsp)
+       pushq %rax
+       SAVE_FLAGS(CLBR_RAX)
+       testl $X86_EFLAGS_IF, %eax
        jz .Lokay_\@
        ud2
 .Lokay_\@:
-       addq $8, %rsp
+       popq %rax
 #endif
 .endm
 
@@ -563,6 +638,13 @@ END(irq_entries_start)
 /* 0(%rsp): ~(interrupt number) */
        .macro interrupt func
        cld
+
+       testb   $3, CS-ORIG_RAX(%rsp)
+       jz      1f
+       SWAPGS
+       call    switch_to_thread_stack
+1:
+
        ALLOC_PT_GPREGS_ON_STACK
        SAVE_C_REGS
        SAVE_EXTRA_REGS
@@ -572,12 +654,8 @@ END(irq_entries_start)
        jz      1f
 
        /*
-        * IRQ from user mode.  Switch to kernel gsbase and inform context
-        * tracking that we're in kernel mode.
-        */
-       SWAPGS
-
-       /*
+        * IRQ from user mode.
+        *
         * We need to tell lockdep that IRQs are off.  We can't do this until
         * we fix gsbase, and we should do it before enter_from_user_mode
         * (which can take locks).  Since TRACE_IRQS_OFF idempotent,
@@ -630,10 +708,41 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
        ud2
 1:
 #endif
-       SWAPGS
        POP_EXTRA_REGS
-       POP_C_REGS
-       addq    $8, %rsp        /* skip regs->orig_ax */
+       popq    %r11
+       popq    %r10
+       popq    %r9
+       popq    %r8
+       popq    %rax
+       popq    %rcx
+       popq    %rdx
+       popq    %rsi
+
+       /*
+        * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
+        * Save old stack pointer and switch to trampoline stack.
+        */
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+
+       /* Copy the IRET frame to the trampoline stack. */
+       pushq   6*8(%rdi)       /* SS */
+       pushq   5*8(%rdi)       /* RSP */
+       pushq   4*8(%rdi)       /* EFLAGS */
+       pushq   3*8(%rdi)       /* CS */
+       pushq   2*8(%rdi)       /* RIP */
+
+       /* Push user RDI on the trampoline stack. */
+       pushq   (%rdi)
+
+       /*
+        * We are on the trampoline stack.  All regs except RDI are live.
+        * We can do future final exit work right here.
+        */
+
+       /* Restore RDI. */
+       popq    %rdi
+       SWAPGS
        INTERRUPT_RETURN
 
 
@@ -829,7 +938,33 @@ apicinterrupt IRQ_WORK_VECTOR                      irq_work_interrupt              smp_irq_work_interrupt
 /*
  * Exception entry points.
  */
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
+#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
+
+/*
+ * Switch to the thread stack.  This is called with the IRET frame and
+ * orig_ax on the stack.  (That is, RDI..R12 are not on the stack and
+ * space has not been allocated for them.)
+ */
+ENTRY(switch_to_thread_stack)
+       UNWIND_HINT_FUNC
+
+       pushq   %rdi
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+       UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI
+
+       pushq   7*8(%rdi)               /* regs->ss */
+       pushq   6*8(%rdi)               /* regs->rsp */
+       pushq   5*8(%rdi)               /* regs->eflags */
+       pushq   4*8(%rdi)               /* regs->cs */
+       pushq   3*8(%rdi)               /* regs->ip */
+       pushq   2*8(%rdi)               /* regs->orig_ax */
+       pushq   8(%rdi)                 /* return address */
+       UNWIND_HINT_FUNC
+
+       movq    (%rdi), %rdi
+       ret
+END(switch_to_thread_stack)
 
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
 ENTRY(\sym)
@@ -848,11 +983,12 @@ ENTRY(\sym)
 
        ALLOC_PT_GPREGS_ON_STACK
 
-       .if \paranoid
-       .if \paranoid == 1
+       .if \paranoid < 2
        testb   $3, CS(%rsp)                    /* If coming from userspace, switch stacks */
-       jnz     1f
+       jnz     .Lfrom_usermode_switch_stack_\@
        .endif
+
+       .if \paranoid
        call    paranoid_entry
        .else
        call    error_entry
@@ -894,20 +1030,15 @@ ENTRY(\sym)
        jmp     error_exit
        .endif
 
-       .if \paranoid == 1
+       .if \paranoid < 2
        /*
-        * Paranoid entry from userspace.  Switch stacks and treat it
+        * Entry from userspace.  Switch stacks and treat it
         * as a normal entry.  This means that paranoid handlers
         * run in real process context if user_mode(regs).
         */
-1:
+.Lfrom_usermode_switch_stack_\@:
        call    error_entry
 
-
-       movq    %rsp, %rdi                      /* pt_regs pointer */
-       call    sync_regs
-       movq    %rax, %rsp                      /* switch stack */
-
        movq    %rsp, %rdi                      /* pt_regs pointer */
 
        .if \has_error_code
@@ -1170,6 +1301,14 @@ ENTRY(error_entry)
        SWAPGS
 
 .Lerror_entry_from_usermode_after_swapgs:
+       /* Put us onto the real thread stack. */
+       popq    %r12                            /* save return addr in %12 */
+       movq    %rsp, %rdi                      /* arg0 = pt_regs pointer */
+       call    sync_regs
+       movq    %rax, %rsp                      /* switch stack */
+       ENCODE_FRAME_POINTER
+       pushq   %r12
+
        /*
         * We need to tell lockdep that IRQs are off.  We can't do this until
         * we fix gsbase, and we should do it before enter_from_user_mode
index 568e130d932cd2a7d44393e5fc52408cffe64f34..95ad40eb7effbdb6f605285df62d1e0bd33a6cac 100644 (file)
@@ -48,7 +48,7 @@
  */
 ENTRY(entry_SYSENTER_compat)
        /* Interrupts are off on entry. */
-       SWAPGS_UNSAFE_STACK
+       SWAPGS
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
        /*
@@ -306,8 +306,11 @@ ENTRY(entry_INT80_compat)
         */
        movl    %eax, %eax
 
-       /* Construct struct pt_regs on stack (iret frame is already on stack) */
        pushq   %rax                    /* pt_regs->orig_ax */
+
+       /* switch to thread stack expects orig_ax to be pushed */
+       call    switch_to_thread_stack
+
        pushq   %rdi                    /* pt_regs->di */
        pushq   %rsi                    /* pt_regs->si */
        pushq   %rdx                    /* pt_regs->dx */
index bf6a76202a779ee131b4df8c89449ab52abd0a79..ea9a7dde62e5c4d551ba89e429f911fb5c6603fd 100644 (file)
@@ -135,6 +135,8 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
        set_bit(bit, (unsigned long *)cpu_caps_set);    \
 } while (0)
 
+#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
+
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
index 4011cb03ef08e52db15f52779ce366c26359a34b..aab4fe9f49f868a03a5c2da5eeb788a6bb80c24d 100644 (file)
@@ -60,17 +60,10 @@ static inline struct desc_struct *get_current_gdt_rw(void)
        return this_cpu_ptr(&gdt_page)->gdt;
 }
 
-/* Get the fixmap index for a specific processor */
-static inline unsigned int get_cpu_gdt_ro_index(int cpu)
-{
-       return FIX_GDT_REMAP_BEGIN + cpu;
-}
-
 /* Provide the fixmap address of the remapped GDT */
 static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
 {
-       unsigned int idx = get_cpu_gdt_ro_index(cpu);
-       return (struct desc_struct *)__fix_to_virt(idx);
+       return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt;
 }
 
 /* Provide the current read-only GDT */
@@ -185,7 +178,7 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr,
 #endif
 }
 
-static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
+static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr)
 {
        struct desc_struct *d = get_cpu_gdt_rw(cpu);
        tss_desc tss;
index b0c505fe9a958c701fef6d96f281bb8ab1a773de..94fc4fa141275bcdd6eb43505fc8e5f20352aca4 100644 (file)
@@ -44,6 +44,45 @@ extern unsigned long __FIXADDR_TOP;
                         PAGE_SIZE)
 #endif
 
+/*
+ * cpu_entry_area is a percpu region in the fixmap that contains things
+ * needed by the CPU and early entry/exit code.  Real types aren't used
+ * for all fields here to avoid circular header dependencies.
+ *
+ * Every field is a virtual alias of some other allocated backing store.
+ * There is no direct allocation of a struct cpu_entry_area.
+ */
+struct cpu_entry_area {
+       char gdt[PAGE_SIZE];
+
+       /*
+        * The GDT is just below SYSENTER_stack and thus serves (on x86_64) as
+        * a a read-only guard page.
+        */
+       struct SYSENTER_stack_page SYSENTER_stack_page;
+
+       /*
+        * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
+        * we need task switches to work, and task switches write to the TSS.
+        */
+       struct tss_struct tss;
+
+       char entry_trampoline[PAGE_SIZE];
+
+#ifdef CONFIG_X86_64
+       /*
+        * Exception stacks used for IST entries.
+        *
+        * In the future, this should have a separate slot for each stack
+        * with guard pages between them.
+        */
+       char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
+#endif
+};
+
+#define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE)
+
+extern void setup_cpu_entry_areas(void);
 
 /*
  * Here we define all the compile-time 'special' virtual
@@ -101,8 +140,8 @@ enum fixed_addresses {
        FIX_LNW_VRTC,
 #endif
        /* Fixmap entries to remap the GDTs, one per processor. */
-       FIX_GDT_REMAP_BEGIN,
-       FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1,
+       FIX_CPU_ENTRY_AREA_TOP,
+       FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1,
 
 #ifdef CONFIG_ACPI_APEI_GHES
        /* Used for GHES mapping from assorted contexts */
@@ -191,5 +230,30 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
 void __early_set_fixmap(enum fixed_addresses idx,
                        phys_addr_t phys, pgprot_t flags);
 
+static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page)
+{
+       BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
+
+       return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page;
+}
+
+#define __get_cpu_entry_area_offset_index(cpu, offset) ({              \
+       BUILD_BUG_ON(offset % PAGE_SIZE != 0);                          \
+       __get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE);       \
+       })
+
+#define get_cpu_entry_area_index(cpu, field)                           \
+       __get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field))
+
+static inline struct cpu_entry_area *get_cpu_entry_area(int cpu)
+{
+       return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0));
+}
+
+static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu)
+{
+       return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack;
+}
+
 #endif /* !__ASSEMBLY__ */
 #endif /* _ASM_X86_FIXMAP_H */
index 1b0a5abcd8aeb6e700013c5434aaeb0bba7a152f..96aa6b9884dc5b3bc8d54c9ef1c6258eea13a0d0 100644 (file)
 #ifndef _ASM_X86_HYPERVISOR_H
 #define _ASM_X86_HYPERVISOR_H
 
-#ifdef CONFIG_HYPERVISOR_GUEST
-
-#include <asm/kvm_para.h>
-#include <asm/x86_init.h>
-#include <asm/xen/hypervisor.h>
-
-/*
- * x86 hypervisor information
- */
-
+/* x86 hypervisor types  */
 enum x86_hypervisor_type {
        X86_HYPER_NATIVE = 0,
        X86_HYPER_VMWARE,
@@ -39,6 +30,12 @@ enum x86_hypervisor_type {
        X86_HYPER_KVM,
 };
 
+#ifdef CONFIG_HYPERVISOR_GUEST
+
+#include <asm/kvm_para.h>
+#include <asm/x86_init.h>
+#include <asm/xen/hypervisor.h>
+
 struct hypervisor_x86 {
        /* Hypervisor name */
        const char      *name;
@@ -58,7 +55,15 @@ struct hypervisor_x86 {
 
 extern enum x86_hypervisor_type x86_hyper_type;
 extern void init_hypervisor_platform(void);
+static inline bool hypervisor_is_type(enum x86_hypervisor_type type)
+{
+       return x86_hyper_type == type;
+}
 #else
 static inline void init_hypervisor_platform(void) { }
+static inline bool hypervisor_is_type(enum x86_hypervisor_type type)
+{
+       return type == X86_HYPER_NATIVE;
+}
 #endif /* CONFIG_HYPERVISOR_GUEST */
 #endif /* _ASM_X86_HYPERVISOR_H */
index c8ef23f2c28f17c59308b9c41179c47f85e075ad..89f08955fff733c688a5ce4f4a0b8d74050ee617 100644 (file)
@@ -142,6 +142,9 @@ static inline notrace unsigned long arch_local_irq_save(void)
        swapgs;                                 \
        sysretl
 
+#ifdef CONFIG_DEBUG_ENTRY
+#define SAVE_FLAGS(x)          pushfq; popq %rax
+#endif
 #else
 #define INTERRUPT_RETURN               iret
 #define ENABLE_INTERRUPTS_SYSEXIT      sti; sysexit
index f86a8caa561e8873c3f34e6e8b8cd509ebadd819..395c9631e000a3a17aa574c1b25fcc2cafd5b5fb 100644 (file)
@@ -26,6 +26,7 @@ extern void die(const char *, struct pt_regs *,long);
 extern int __must_check __die(const char *, struct pt_regs *, long);
 extern void show_stack_regs(struct pt_regs *regs);
 extern void __show_regs(struct pt_regs *regs, int all);
+extern void show_iret_regs(struct pt_regs *regs);
 extern unsigned long oops_begin(void);
 extern void oops_end(unsigned long, struct pt_regs *, int signr);
 
index 283efcaac8aff86f2c004bc23e4b8642cbf3d527..892df375b6155a51f584760efb9f9e77c3f732e8 100644 (file)
@@ -927,6 +927,15 @@ extern void default_banner(void);
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
                  CLBR_NONE,                                            \
                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
+
+#ifdef CONFIG_DEBUG_ENTRY
+#define SAVE_FLAGS(clobbers)                                        \
+       PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
+                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
+                 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl);    \
+                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+#endif
+
 #endif /* CONFIG_X86_32 */
 
 #endif /* __ASSEMBLY__ */
index cc16fa882e3e760a40351cf3e7476ac9f25ffe00..1f2434ee9f806c4355a38599ab4485140a8cd1df 100644 (file)
@@ -163,9 +163,9 @@ enum cpuid_regs_idx {
 extern struct cpuinfo_x86      boot_cpu_data;
 extern struct cpuinfo_x86      new_cpu_data;
 
-extern struct tss_struct       doublefault_tss;
-extern __u32                   cpu_caps_cleared[NCAPINTS];
-extern __u32                   cpu_caps_set[NCAPINTS];
+extern struct x86_hw_tss       doublefault_tss;
+extern __u32                   cpu_caps_cleared[NCAPINTS + NBUGINTS];
+extern __u32                   cpu_caps_set[NCAPINTS + NBUGINTS];
 
 #ifdef CONFIG_SMP
 DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
@@ -253,6 +253,11 @@ static inline void load_cr3(pgd_t *pgdir)
        write_cr3(__sme_pa(pgdir));
 }
 
+/*
+ * Note that while the legacy 'TSS' name comes from 'Task State Segment',
+ * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
+ * unrelated to the task-switch mechanism:
+ */
 #ifdef CONFIG_X86_32
 /* This is the TSS defined by the hardware. */
 struct x86_hw_tss {
@@ -305,7 +310,13 @@ struct x86_hw_tss {
 struct x86_hw_tss {
        u32                     reserved1;
        u64                     sp0;
+
+       /*
+        * We store cpu_current_top_of_stack in sp1 so it's always accessible.
+        * Linux does not use ring 1, so sp1 is not otherwise needed.
+        */
        u64                     sp1;
+
        u64                     sp2;
        u64                     reserved2;
        u64                     ist[7];
@@ -323,12 +334,22 @@ struct x86_hw_tss {
 #define IO_BITMAP_BITS                 65536
 #define IO_BITMAP_BYTES                        (IO_BITMAP_BITS/8)
 #define IO_BITMAP_LONGS                        (IO_BITMAP_BYTES/sizeof(long))
-#define IO_BITMAP_OFFSET               offsetof(struct tss_struct, io_bitmap)
+#define IO_BITMAP_OFFSET               (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
 #define INVALID_IO_BITMAP_OFFSET       0x8000
 
+struct SYSENTER_stack {
+       unsigned long           words[64];
+};
+
+struct SYSENTER_stack_page {
+       struct SYSENTER_stack stack;
+} __aligned(PAGE_SIZE);
+
 struct tss_struct {
        /*
-        * The hardware state:
+        * The fixed hardware portion.  This must not cross a page boundary
+        * at risk of violating the SDM's advice and potentially triggering
+        * errata.
         */
        struct x86_hw_tss       x86_tss;
 
@@ -339,18 +360,9 @@ struct tss_struct {
         * be within the limit.
         */
        unsigned long           io_bitmap[IO_BITMAP_LONGS + 1];
+} __aligned(PAGE_SIZE);
 
-#ifdef CONFIG_X86_32
-       /*
-        * Space for the temporary SYSENTER stack.
-        */
-       unsigned long           SYSENTER_stack_canary;
-       unsigned long           SYSENTER_stack[64];
-#endif
-
-} ____cacheline_aligned;
-
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
+DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
 
 /*
  * sizeof(unsigned long) coming from an extra "long" at the end
@@ -364,6 +376,9 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
 
 #ifdef CONFIG_X86_32
 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
+#else
+/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
+#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
 #endif
 
 /*
@@ -523,7 +538,7 @@ static inline void native_set_iopl_mask(unsigned mask)
 static inline void
 native_load_sp0(unsigned long sp0)
 {
-       this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
+       this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
 }
 
 static inline void native_swapgs(void)
@@ -535,12 +550,12 @@ static inline void native_swapgs(void)
 
 static inline unsigned long current_top_of_stack(void)
 {
-#ifdef CONFIG_X86_64
-       return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
-#else
-       /* sp0 on x86_32 is special in and around vm86 mode. */
+       /*
+        *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
+        *  and around vm86 mode and sp0 on x86_64 is special because of the
+        *  entry trampoline.
+        */
        return this_cpu_read_stable(cpu_current_top_of_stack);
-#endif
 }
 
 static inline bool on_thread_stack(void)
index 8da111b3c342bbb61a9e630e101c8a83422a15ea..f8062bfd43a072dc950b23f242893b6ca3310652 100644 (file)
@@ -16,6 +16,7 @@ enum stack_type {
        STACK_TYPE_TASK,
        STACK_TYPE_IRQ,
        STACK_TYPE_SOFTIRQ,
+       STACK_TYPE_SYSENTER,
        STACK_TYPE_EXCEPTION,
        STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1,
 };
@@ -28,6 +29,8 @@ struct stack_info {
 bool in_task_stack(unsigned long *stack, struct task_struct *task,
                   struct stack_info *info);
 
+bool in_sysenter_stack(unsigned long *stack, struct stack_info *info);
+
 int get_stack_info(unsigned long *stack, struct task_struct *task,
                   struct stack_info *info, unsigned long *visit_mask);
 
index 8c6bd6863db9d6b737cd0649324c154f9b9798a3..9b6df68d8fd1eba26f3651faa5c8b8f4dcf223f1 100644 (file)
@@ -79,10 +79,10 @@ do {                                                                        \
 static inline void refresh_sysenter_cs(struct thread_struct *thread)
 {
        /* Only happens when SEP is enabled, no need to test "SEP"arately: */
-       if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs))
+       if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
                return;
 
-       this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs);
+       this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
        wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
 }
 #endif
@@ -90,10 +90,12 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
 /* This is used when switching tasks or entering/exiting vm86 mode. */
 static inline void update_sp0(struct task_struct *task)
 {
+       /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
 #ifdef CONFIG_X86_32
        load_sp0(task->thread.sp0);
 #else
-       load_sp0(task_top_of_stack(task));
+       if (static_cpu_has(X86_FEATURE_XENPV))
+               load_sp0(task_top_of_stack(task));
 #endif
 }
 
index 70f425947dc50f3e99ca639c0ead0d7e1cce636d..00223333821a96616647a9cbb6fe729c4a18b7b6 100644 (file)
@@ -207,7 +207,7 @@ static inline int arch_within_stack_frames(const void * const stack,
 #else /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_X86_64
-# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
+# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
 #endif
 
 #endif
index 1fadd310ff680ece697fa65a8db410c380a8547e..31051f35cbb768e452c4f76a60c5415a45f572e7 100644 (file)
@@ -75,7 +75,6 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long);
 dotraplinkage void do_stack_segment(struct pt_regs *, long);
 #ifdef CONFIG_X86_64
 dotraplinkage void do_double_fault(struct pt_regs *, long);
-asmlinkage struct pt_regs *sync_regs(struct pt_regs *);
 #endif
 dotraplinkage void do_general_protection(struct pt_regs *, long);
 dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
index e9cc6fe1fc6f953c38ddcc61fcf06fd90d72ab04..c1688c2d0a128f063053697dc60bcbfbca509765 100644 (file)
@@ -7,6 +7,9 @@
 #include <asm/ptrace.h>
 #include <asm/stacktrace.h>
 
+#define IRET_FRAME_OFFSET (offsetof(struct pt_regs, ip))
+#define IRET_FRAME_SIZE   (sizeof(struct pt_regs) - IRET_FRAME_OFFSET)
+
 struct unwind_state {
        struct stack_info stack_info;
        unsigned long stack_mask;
@@ -52,6 +55,10 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
 }
 
 #if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
+/*
+ * WARNING: The entire pt_regs may not be safe to dereference.  In some cases,
+ * only the iret frame registers are accessible.  Use with caution!
+ */
 static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
 {
        if (unwind_done(state))
index 8ea78275480dafeb702e11ba73364cd9e7c52f21..cd360a5e0dca30f2f1ad052b197606e55f701db0 100644 (file)
@@ -93,4 +93,10 @@ void common(void) {
 
        BLANK();
        DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
+
+       /* Layout info for cpu_entry_area */
+       OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
+       OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
+       OFFSET(CPU_ENTRY_AREA_SYSENTER_stack, cpu_entry_area, SYSENTER_stack_page);
+       DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack));
 }
index dedf428b20b68b0a4748fc1ac3032193c9121362..7d20d9c0b3d69cfaa717233a868218fe9d2cb694 100644 (file)
@@ -47,13 +47,8 @@ void foo(void)
        BLANK();
 
        /* Offset from the sysenter stack to tss.sp0 */
-       DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
-              offsetofend(struct tss_struct, SYSENTER_stack));
-
-       /* Offset from cpu_tss to SYSENTER_stack */
-       OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack);
-       /* Size of SYSENTER_stack */
-       DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack));
+       DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
+              offsetofend(struct cpu_entry_area, SYSENTER_stack_page.stack));
 
 #ifdef CONFIG_CC_STACKPROTECTOR
        BLANK();
index 630212fa9b9da3f0498fc30d4c193c5926c43abb..bf51e51d808dd8914abd3b4bca69b37ce3ec023b 100644 (file)
@@ -23,6 +23,9 @@ int main(void)
 #ifdef CONFIG_PARAVIRT
        OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
        OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
+#ifdef CONFIG_DEBUG_ENTRY
+       OFFSET(PV_IRQ_save_fl, pv_irq_ops, save_fl);
+#endif
        BLANK();
 #endif
 
@@ -63,6 +66,7 @@ int main(void)
 
        OFFSET(TSS_ist, tss_struct, x86_tss.ist);
        OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
+       OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
        BLANK();
 
 #ifdef CONFIG_CC_STACKPROTECTOR
index fa998ca8aa5aa5b4899dbe8a57c5b543f927009e..7416da3ec4dfa0b0f275dd10a5f9bfa12b884022 100644 (file)
@@ -476,8 +476,8 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
        return NULL;            /* Not found */
 }
 
-__u32 cpu_caps_cleared[NCAPINTS];
-__u32 cpu_caps_set[NCAPINTS];
+__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
+__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
 
 void load_percpu_segment(int cpu)
 {
@@ -490,27 +490,116 @@ void load_percpu_segment(int cpu)
        load_stack_canary_segment();
 }
 
-/* Setup the fixmap mapping only once per-processor */
-static inline void setup_fixmap_gdt(int cpu)
+#ifdef CONFIG_X86_32
+/* The 32-bit entry code needs to find cpu_entry_area. */
+DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
+#endif
+
+#ifdef CONFIG_X86_64
+/*
+ * Special IST stacks which the CPU switches to when it calls
+ * an IST-marked descriptor entry. Up to 7 stacks (hardware
+ * limit), all of them are 4K, except the debug stack which
+ * is 8K.
+ */
+static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
+         [0 ... N_EXCEPTION_STACKS - 1]        = EXCEPTION_STKSZ,
+         [DEBUG_STACK - 1]                     = DEBUG_STKSZ
+};
+
+static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+       [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
+#endif
+
+static DEFINE_PER_CPU_PAGE_ALIGNED(struct SYSENTER_stack_page,
+                                  SYSENTER_stack_storage);
+
+static void __init
+set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
+{
+       for ( ; pages; pages--, idx--, ptr += PAGE_SIZE)
+               __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot);
+}
+
+/* Setup the fixmap mappings only once per-processor */
+static void __init setup_cpu_entry_area(int cpu)
 {
 #ifdef CONFIG_X86_64
-       /* On 64-bit systems, we use a read-only fixmap GDT. */
-       pgprot_t prot = PAGE_KERNEL_RO;
+       extern char _entry_trampoline[];
+
+       /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
+       pgprot_t gdt_prot = PAGE_KERNEL_RO;
+       pgprot_t tss_prot = PAGE_KERNEL_RO;
 #else
        /*
         * On native 32-bit systems, the GDT cannot be read-only because
         * our double fault handler uses a task gate, and entering through
-        * a task gate needs to change an available TSS to busy.  If the GDT
-        * is read-only, that will triple fault.
+        * a task gate needs to change an available TSS to busy.  If the
+        * GDT is read-only, that will triple fault.  The TSS cannot be
+        * read-only because the CPU writes to it on task switches.
         *
-        * On Xen PV, the GDT must be read-only because the hypervisor requires
-        * it.
+        * On Xen PV, the GDT must be read-only because the hypervisor
+        * requires it.
         */
-       pgprot_t prot = boot_cpu_has(X86_FEATURE_XENPV) ?
+       pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
                PAGE_KERNEL_RO : PAGE_KERNEL;
+       pgprot_t tss_prot = PAGE_KERNEL;
 #endif
 
-       __set_fixmap(get_cpu_gdt_ro_index(cpu), get_cpu_gdt_paddr(cpu), prot);
+       __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
+       set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, SYSENTER_stack_page),
+                               per_cpu_ptr(&SYSENTER_stack_storage, cpu), 1,
+                               PAGE_KERNEL);
+
+       /*
+        * The Intel SDM says (Volume 3, 7.2.1):
+        *
+        *  Avoid placing a page boundary in the part of the TSS that the
+        *  processor reads during a task switch (the first 104 bytes). The
+        *  processor may not correctly perform address translations if a
+        *  boundary occurs in this area. During a task switch, the processor
+        *  reads and writes into the first 104 bytes of each TSS (using
+        *  contiguous physical addresses beginning with the physical address
+        *  of the first byte of the TSS). So, after TSS access begins, if
+        *  part of the 104 bytes is not physically contiguous, the processor
+        *  will access incorrect information without generating a page-fault
+        *  exception.
+        *
+        * There are also a lot of errata involving the TSS spanning a page
+        * boundary.  Assert that we're not doing that.
+        */
+       BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
+                     offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
+       BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
+       set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
+                               &per_cpu(cpu_tss_rw, cpu),
+                               sizeof(struct tss_struct) / PAGE_SIZE,
+                               tss_prot);
+
+#ifdef CONFIG_X86_32
+       per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
+#endif
+
+#ifdef CONFIG_X86_64
+       BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
+       BUILD_BUG_ON(sizeof(exception_stacks) !=
+                    sizeof(((struct cpu_entry_area *)0)->exception_stacks));
+       set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks),
+                               &per_cpu(exception_stacks, cpu),
+                               sizeof(exception_stacks) / PAGE_SIZE,
+                               PAGE_KERNEL);
+
+       __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline),
+                    __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
+#endif
+}
+
+void __init setup_cpu_entry_areas(void)
+{
+       unsigned int cpu;
+
+       for_each_possible_cpu(cpu)
+               setup_cpu_entry_area(cpu);
 }
 
 /* Load the original GDT from the per-cpu structure */
@@ -747,7 +836,7 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
 {
        int i;
 
-       for (i = 0; i < NCAPINTS; i++) {
+       for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
                c->x86_capability[i] &= ~cpu_caps_cleared[i];
                c->x86_capability[i] |= cpu_caps_set[i];
        }
@@ -1250,7 +1339,7 @@ void enable_sep_cpu(void)
                return;
 
        cpu = get_cpu();
-       tss = &per_cpu(cpu_tss, cpu);
+       tss = &per_cpu(cpu_tss_rw, cpu);
 
        /*
         * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
@@ -1259,11 +1348,7 @@ void enable_sep_cpu(void)
 
        tss->x86_tss.ss1 = __KERNEL_CS;
        wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
-
-       wrmsr(MSR_IA32_SYSENTER_ESP,
-             (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
-             0);
-
+       wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1), 0);
        wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
 
        put_cpu();
@@ -1357,25 +1442,19 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
 
-/*
- * Special IST stacks which the CPU switches to when it calls
- * an IST-marked descriptor entry. Up to 7 stacks (hardware
- * limit), all of them are 4K, except the debug stack which
- * is 8K.
- */
-static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
-         [0 ... N_EXCEPTION_STACKS - 1]        = EXCEPTION_STKSZ,
-         [DEBUG_STACK - 1]                     = DEBUG_STKSZ
-};
-
-static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
-       [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
-
 /* May not be marked __init: used by software suspend */
 void syscall_init(void)
 {
+       extern char _entry_trampoline[];
+       extern char entry_SYSCALL_64_trampoline[];
+
+       int cpu = smp_processor_id();
+       unsigned long SYSCALL64_entry_trampoline =
+               (unsigned long)get_cpu_entry_area(cpu)->entry_trampoline +
+               (entry_SYSCALL_64_trampoline - _entry_trampoline);
+
        wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
-       wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
+       wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
 
 #ifdef CONFIG_IA32_EMULATION
        wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
@@ -1386,7 +1465,7 @@ void syscall_init(void)
         * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
         */
        wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1));
        wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
 #else
        wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
@@ -1530,7 +1609,7 @@ void cpu_init(void)
        if (cpu)
                load_ucode_ap();
 
-       t = &per_cpu(cpu_tss, cpu);
+       t = &per_cpu(cpu_tss_rw, cpu);
        oist = &per_cpu(orig_ist, cpu);
 
 #ifdef CONFIG_NUMA
@@ -1569,7 +1648,7 @@ void cpu_init(void)
         * set up and load the per-CPU TSS
         */
        if (!oist->ist[0]) {
-               char *estacks = per_cpu(exception_stacks, cpu);
+               char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
 
                for (v = 0; v < N_EXCEPTION_STACKS; v++) {
                        estacks += exception_stack_sizes[v];
@@ -1580,7 +1659,7 @@ void cpu_init(void)
                }
        }
 
-       t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+       t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
 
        /*
         * <= is required because the CPU will access up to
@@ -1596,11 +1675,12 @@ void cpu_init(void)
        enter_lazy_tlb(&init_mm, me);
 
        /*
-        * Initialize the TSS.  Don't bother initializing sp0, as the initial
-        * task never enters user mode.
+        * Initialize the TSS.  sp0 points to the entry trampoline stack
+        * regardless of what task is running.
         */
-       set_tss_desc(cpu, t);
+       set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
        load_TR_desc();
+       load_sp0((unsigned long)(cpu_SYSENTER_stack(cpu) + 1));
 
        load_mm_ldt(&init_mm);
 
@@ -1612,7 +1692,6 @@ void cpu_init(void)
        if (is_uv_system())
                uv_cpu_init();
 
-       setup_fixmap_gdt(cpu);
        load_fixmap_gdt(cpu);
 }
 
@@ -1622,7 +1701,7 @@ void cpu_init(void)
 {
        int cpu = smp_processor_id();
        struct task_struct *curr = current;
-       struct tss_struct *t = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
 
        wait_for_master_cpu(cpu);
 
@@ -1657,12 +1736,12 @@ void cpu_init(void)
         * Initialize the TSS.  Don't bother initializing sp0, as the initial
         * task never enters user mode.
         */
-       set_tss_desc(cpu, t);
+       set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
        load_TR_desc();
 
        load_mm_ldt(&init_mm);
 
-       t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+       t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
 
 #ifdef CONFIG_DOUBLEFAULT
        /* Set up doublefault TSS pointer in the GDT */
@@ -1674,7 +1753,6 @@ void cpu_init(void)
 
        fpu__init_cpu();
 
-       setup_fixmap_gdt(cpu);
        load_fixmap_gdt(cpu);
 }
 #endif
index 0e662c55ae902fedd5c78c1ed87a972b35a79856..0b8cedb20d6d92f2875a49292680c8cfecd5b044 100644 (file)
@@ -50,25 +50,23 @@ static void doublefault_fn(void)
                cpu_relax();
 }
 
-struct tss_struct doublefault_tss __cacheline_aligned = {
-       .x86_tss = {
-               .sp0            = STACK_START,
-               .ss0            = __KERNEL_DS,
-               .ldt            = 0,
-               .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
-
-               .ip             = (unsigned long) doublefault_fn,
-               /* 0x2 bit is always set */
-               .flags          = X86_EFLAGS_SF | 0x2,
-               .sp             = STACK_START,
-               .es             = __USER_DS,
-               .cs             = __KERNEL_CS,
-               .ss             = __KERNEL_DS,
-               .ds             = __USER_DS,
-               .fs             = __KERNEL_PERCPU,
-
-               .__cr3          = __pa_nodebug(swapper_pg_dir),
-       }
+struct x86_hw_tss doublefault_tss __cacheline_aligned = {
+       .sp0            = STACK_START,
+       .ss0            = __KERNEL_DS,
+       .ldt            = 0,
+       .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
+
+       .ip             = (unsigned long) doublefault_fn,
+       /* 0x2 bit is always set */
+       .flags          = X86_EFLAGS_SF | 0x2,
+       .sp             = STACK_START,
+       .es             = __USER_DS,
+       .cs             = __KERNEL_CS,
+       .ss             = __KERNEL_DS,
+       .ds             = __USER_DS,
+       .fs             = __KERNEL_PERCPU,
+
+       .__cr3          = __pa_nodebug(swapper_pg_dir),
 };
 
 /* dummy for do_double_fault() call */
index f13b4c00a5de4b7a7b36c40d27311672bcc9d05c..bbd6d986e2d0fc22b5b3c23c794ade410b9f9973 100644 (file)
@@ -43,6 +43,24 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
        return true;
 }
 
+bool in_sysenter_stack(unsigned long *stack, struct stack_info *info)
+{
+       struct SYSENTER_stack *ss = cpu_SYSENTER_stack(smp_processor_id());
+
+       void *begin = ss;
+       void *end = ss + 1;
+
+       if ((void *)stack < begin || (void *)stack >= end)
+               return false;
+
+       info->type      = STACK_TYPE_SYSENTER;
+       info->begin     = begin;
+       info->end       = end;
+       info->next_sp   = NULL;
+
+       return true;
+}
+
 static void printk_stack_address(unsigned long address, int reliable,
                                 char *log_lvl)
 {
@@ -50,6 +68,28 @@ static void printk_stack_address(unsigned long address, int reliable,
        printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
 }
 
+void show_iret_regs(struct pt_regs *regs)
+{
+       printk(KERN_DEFAULT "RIP: %04x:%pS\n", (int)regs->cs, (void *)regs->ip);
+       printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss,
+               regs->sp, regs->flags);
+}
+
+static void show_regs_safe(struct stack_info *info, struct pt_regs *regs)
+{
+       if (on_stack(info, regs, sizeof(*regs)))
+               __show_regs(regs, 0);
+       else if (on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
+                         IRET_FRAME_SIZE)) {
+               /*
+                * When an interrupt or exception occurs in entry code, the
+                * full pt_regs might not have been saved yet.  In that case
+                * just print the iret frame.
+                */
+               show_iret_regs(regs);
+       }
+}
+
 void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
                        unsigned long *stack, char *log_lvl)
 {
@@ -71,31 +111,35 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
         * - task stack
         * - interrupt stack
         * - HW exception stacks (double fault, nmi, debug, mce)
+        * - SYSENTER stack
         *
-        * x86-32 can have up to three stacks:
+        * x86-32 can have up to four stacks:
         * - task stack
         * - softirq stack
         * - hardirq stack
+        * - SYSENTER stack
         */
        for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
                const char *stack_name;
 
-               /*
-                * If we overflowed the task stack into a guard page, jump back
-                * to the bottom of the usable stack.
-                */
-               if (task_stack_page(task) - (void *)stack < PAGE_SIZE)
-                       stack = task_stack_page(task);
-
-               if (get_stack_info(stack, task, &stack_info, &visit_mask))
-                       break;
+               if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
+                       /*
+                        * We weren't on a valid stack.  It's possible that
+                        * we overflowed a valid stack into a guard page.
+                        * See if the next page up is valid so that we can
+                        * generate some kind of backtrace if this happens.
+                        */
+                       stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
+                       if (get_stack_info(stack, task, &stack_info, &visit_mask))
+                               break;
+               }
 
                stack_name = stack_type_name(stack_info.type);
                if (stack_name)
                        printk("%s <%s>\n", log_lvl, stack_name);
 
-               if (regs && on_stack(&stack_info, regs, sizeof(*regs)))
-                       __show_regs(regs, 0);
+               if (regs)
+                       show_regs_safe(&stack_info, regs);
 
                /*
                 * Scan the stack, printing any text addresses we find.  At the
@@ -119,7 +163,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
 
                        /*
                         * Don't print regs->ip again if it was already printed
-                        * by __show_regs() below.
+                        * by show_regs_safe() below.
                         */
                        if (regs && stack == &regs->ip)
                                goto next;
@@ -155,8 +199,8 @@ next:
 
                        /* if the frame has entry regs, print them */
                        regs = unwind_get_entry_regs(&state);
-                       if (regs && on_stack(&stack_info, regs, sizeof(*regs)))
-                               __show_regs(regs, 0);
+                       if (regs)
+                               show_regs_safe(&stack_info, regs);
                }
 
                if (stack_name)
index daefae83a3aa86c59602b75bd3e6734c6e3b1030..5ff13a6b368069f68505099ce94267b8bf0f45b9 100644 (file)
@@ -26,6 +26,9 @@ const char *stack_type_name(enum stack_type type)
        if (type == STACK_TYPE_SOFTIRQ)
                return "SOFTIRQ";
 
+       if (type == STACK_TYPE_SYSENTER)
+               return "SYSENTER";
+
        return NULL;
 }
 
@@ -93,6 +96,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
        if (task != current)
                goto unknown;
 
+       if (in_sysenter_stack(stack, info))
+               goto recursion_check;
+
        if (in_hardirq_stack(stack, info))
                goto recursion_check;
 
index 88ce2ffdb110303502ad33e64d357d8af5afd8c6..abc828f8c29785b4fae8398ec19775015447ee22 100644 (file)
@@ -37,6 +37,9 @@ const char *stack_type_name(enum stack_type type)
        if (type == STACK_TYPE_IRQ)
                return "IRQ";
 
+       if (type == STACK_TYPE_SYSENTER)
+               return "SYSENTER";
+
        if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
                return exception_stack_names[type - STACK_TYPE_EXCEPTION];
 
@@ -115,6 +118,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
        if (in_irq_stack(stack, info))
                goto recursion_check;
 
+       if (in_sysenter_stack(stack, info))
+               goto recursion_check;
+
        goto unknown;
 
 recursion_check:
index 3feb648781c470a7a49ee26749712ba7da891fe9..2f723301eb58fc5ad0d6796b342446ae2ee0c9e6 100644 (file)
@@ -67,7 +67,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
         * because the ->io_bitmap_max value must match the bitmap
         * contents:
         */
-       tss = &per_cpu(cpu_tss, get_cpu());
+       tss = &per_cpu(cpu_tss_rw, get_cpu());
 
        if (turn_on)
                bitmap_clear(t->io_bitmap_ptr, from, num);
index 49cfd9fe7589fa5ef2bef5d4a5d6431b7007836f..68e1867cca8045d0ed728ffc6b75a866c25484ed 100644 (file)
@@ -219,18 +219,6 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
        /* high bit used in ret_from_ code  */
        unsigned vector = ~regs->orig_ax;
 
-       /*
-        * NB: Unlike exception entries, IRQ entries do not reliably
-        * handle context tracking in the low-level entry code.  This is
-        * because syscall entries execute briefly with IRQs on before
-        * updating context tracking state, so we can take an IRQ from
-        * kernel mode with CONTEXT_USER.  The low-level entry code only
-        * updates the context if we came from user mode, so we won't
-        * switch to CONTEXT_KERNEL.  We'll fix that once the syscall
-        * code is cleaned up enough that we can cleanly defer enabling
-        * IRQs.
-        */
-
        entering_irq();
 
        /* entering_irq() tells RCU that we're not quiescent.  Check it. */
index 020efbf5786b35d343a8632cd14ac4f800465d9b..d86e344f5b3debfed504b72a7c0f83f36fe16387 100644 (file)
@@ -57,10 +57,10 @@ static inline void stack_overflow_check(struct pt_regs *regs)
        if (regs->sp >= estack_top && regs->sp <= estack_bottom)
                return;
 
-       WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n",
+       WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx,ip:%pF)\n",
                current->comm, curbase, regs->sp,
                irq_stack_top, irq_stack_bottom,
-               estack_top, estack_bottom);
+               estack_top, estack_bottom, (void *)regs->ip);
 
        if (sysctl_panic_on_stackoverflow)
                panic("low stack detected by irq handler - check messages\n");
index ac0be8283325edfdc2752f862b4c0cef208a931c..9edadabf04f66c657f8a29bb56fe994b2559d5cf 100644 (file)
@@ -10,7 +10,6 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
-DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
 
 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
@@ -60,7 +59,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_mmu_ops, read_cr2);
                PATCH_SITE(pv_mmu_ops, read_cr3);
                PATCH_SITE(pv_mmu_ops, write_cr3);
-               PATCH_SITE(pv_mmu_ops, flush_tlb_single);
                PATCH_SITE(pv_cpu_ops, wbinvd);
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
                case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
index bb988a24db927d758f9120d45f90d1c160628790..aed9d94bd46f41bb049b8e0153a44a43d97e80b4 100644 (file)
@@ -47,7 +47,7 @@
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = {
        .x86_tss = {
                /*
                 * .sp0 is only used when entering ring 0 from a lower
@@ -56,6 +56,16 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
                 * Poison it.
                 */
                .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
+
+#ifdef CONFIG_X86_64
+               /*
+                * .sp1 is cpu_current_top_of_stack.  The init task never
+                * runs user code, but cpu_current_top_of_stack should still
+                * be well defined before the first context switch.
+                */
+               .sp1 = TOP_OF_INIT_STACK,
+#endif
+
 #ifdef CONFIG_X86_32
                .ss0 = __KERNEL_DS,
                .ss1 = __KERNEL_CS,
@@ -71,11 +81,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
          */
        .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },
 #endif
-#ifdef CONFIG_X86_32
-       .SYSENTER_stack_canary  = STACK_END_MAGIC,
-#endif
 };
-EXPORT_PER_CPU_SYMBOL(cpu_tss);
+EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
 
 DEFINE_PER_CPU(bool, __tss_limit_invalid);
 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
@@ -104,7 +111,7 @@ void exit_thread(struct task_struct *tsk)
        struct fpu *fpu = &t->fpu;
 
        if (bp) {
-               struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
+               struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
 
                t->io_bitmap_ptr = NULL;
                clear_thread_flag(TIF_IO_BITMAP);
index 45bf0c5f93e15103060d67d5245756ab72ce8fe5..5224c609918416337b97440eb2d515d8052463ae 100644 (file)
@@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct fpu *prev_fpu = &prev->fpu;
        struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
index eeeb34f85c250e8c01188b6d32cf5a62bd1af8a0..c754662320163107ca3a254362ce0e404a8d3c11 100644 (file)
@@ -69,9 +69,8 @@ void __show_regs(struct pt_regs *regs, int all)
        unsigned int fsindex, gsindex;
        unsigned int ds, cs, es;
 
-       printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs, (void *)regs->ip);
-       printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss,
-               regs->sp, regs->flags);
+       show_iret_regs(regs);
+
        if (regs->orig_ax != -1)
                pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
        else
@@ -88,6 +87,9 @@ void __show_regs(struct pt_regs *regs, int all)
        printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
               regs->r13, regs->r14, regs->r15);
 
+       if (!all)
+               return;
+
        asm("movl %%ds,%0" : "=r" (ds));
        asm("movl %%cs,%0" : "=r" (cs));
        asm("movl %%es,%0" : "=r" (es));
@@ -98,9 +100,6 @@ void __show_regs(struct pt_regs *regs, int all)
        rdmsrl(MSR_GS_BASE, gs);
        rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
 
-       if (!all)
-               return;
-
        cr0 = read_cr0();
        cr2 = read_cr2();
        cr3 = __read_cr3();
@@ -400,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct fpu *prev_fpu = &prev->fpu;
        struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
        WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
                     this_cpu_read(irq_count) != -1);
@@ -462,6 +461,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Switch the PDA and FPU contexts.
         */
        this_cpu_write(current_task, next_p);
+       this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
 
        /* Reload sp0. */
        update_sp0(next_p);
index 989514c94a55d8fa93a07192edd199be1a607bf8..e98f8b66a460b98b31d262cff23fa063be33ac5a 100644 (file)
@@ -348,9 +348,15 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 
        /*
         * If IRET takes a non-IST fault on the espfix64 stack, then we
-        * end up promoting it to a doublefault.  In that case, modify
-        * the stack to make it look like we just entered the #GP
-        * handler from user space, similar to bad_iret.
+        * end up promoting it to a doublefault.  In that case, take
+        * advantage of the fact that we're not using the normal (TSS.sp0)
+        * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
+        * and then modify our own IRET frame so that, when we return,
+        * we land directly at the #GP(0) vector with the stack already
+        * set up according to its expectations.
+        *
+        * The net result is that our #GP handler will think that we
+        * entered from usermode with the bad user context.
         *
         * No need for ist_enter here because we don't use RCU.
         */
@@ -358,13 +364,26 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
                regs->cs == __KERNEL_CS &&
                regs->ip == (unsigned long)native_irq_return_iret)
        {
-               struct pt_regs *normal_regs = task_pt_regs(current);
+               struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
-               /* Fake a #GP(0) from userspace. */
-               memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
-               normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
+               /*
+                * regs->sp points to the failing IRET frame on the
+                * ESPFIX64 stack.  Copy it to the entry stack.  This fills
+                * in gpregs->ss through gpregs->ip.
+                *
+                */
+               memmove(&gpregs->ip, (void *)regs->sp, 5*8);
+               gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
+
+               /*
+                * Adjust our frame so that we return straight to the #GP
+                * vector with the expected RSP value.  This is safe because
+                * we won't enable interupts or schedule before we invoke
+                * general_protection, so nothing will clobber the stack
+                * frame we just set up.
+                */
                regs->ip = (unsigned long)general_protection;
-               regs->sp = (unsigned long)&normal_regs->orig_ax;
+               regs->sp = (unsigned long)&gpregs->orig_ax;
 
                return;
        }
@@ -389,7 +408,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
         *
         *   Processors update CR2 whenever a page fault is detected. If a
         *   second page fault occurs while an earlier page fault is being
-        *   delivered, the faulting linear address of the second fault will
+        *   delivered, the faulting linear address of the second fault will
         *   overwrite the contents of CR2 (replacing the previous
         *   address). These updates to CR2 occur even if the page fault
         *   results in a double fault or occurs during the delivery of a
@@ -605,14 +624,15 @@ NOKPROBE_SYMBOL(do_int3);
 
 #ifdef CONFIG_X86_64
 /*
- * Help handler running on IST stack to switch off the IST stack if the
- * interrupted code was in user mode. The actual stack switch is done in
- * entry_64.S
+ * Help handler running on a per-cpu (IST or entry trampoline) stack
+ * to switch to the normal thread stack if the interrupted code was in
+ * user mode. The actual stack switch is done in entry_64.S
  */
 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
 {
-       struct pt_regs *regs = task_pt_regs(current);
-       *regs = *eregs;
+       struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
+       if (regs != eregs)
+               *regs = *eregs;
        return regs;
 }
 NOKPROBE_SYMBOL(sync_regs);
@@ -628,13 +648,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
        /*
         * This is called from entry_64.S early in handling a fault
         * caused by a bad iret to user mode.  To handle the fault
-        * correctly, we want move our stack frame to task_pt_regs
-        * and we want to pretend that the exception came from the
-        * iret target.
+        * correctly, we want to move our stack frame to where it would
+        * be had we entered directly on the entry stack (rather than
+        * just below the IRET frame) and we want to pretend that the
+        * exception came from the IRET target.
         */
        struct bad_iret_stack *new_stack =
-               container_of(task_pt_regs(current),
-                            struct bad_iret_stack, regs);
+               (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
        /* Copy the IRET target to the new stack. */
        memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
@@ -795,14 +815,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        debug_stack_usage_dec();
 
 exit:
-#if defined(CONFIG_X86_32)
-       /*
-        * This is the most likely code path that involves non-trivial use
-        * of the SYSENTER stack.  Check that we haven't overrun it.
-        */
-       WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC,
-            "Overran or corrupted SYSENTER stack\n");
-#endif
        ist_exit(regs);
 }
 NOKPROBE_SYMBOL(do_debug);
@@ -929,6 +941,9 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 
 void __init trap_init(void)
 {
+       /* Init cpu_entry_area before IST entries are set up */
+       setup_cpu_entry_areas();
+
        idt_setup_traps();
 
        /*
index a3f973b2c97a03b121fe0173dbdc9298216721e6..be86a865087a6b9dc8e04031dbf2e2fbeeda1ed5 100644 (file)
@@ -253,22 +253,15 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
        return NULL;
 }
 
-static bool stack_access_ok(struct unwind_state *state, unsigned long addr,
+static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
                            size_t len)
 {
        struct stack_info *info = &state->stack_info;
+       void *addr = (void *)_addr;
 
-       /*
-        * If the address isn't on the current stack, switch to the next one.
-        *
-        * We may have to traverse multiple stacks to deal with the possibility
-        * that info->next_sp could point to an empty stack and the address
-        * could be on a subsequent stack.
-        */
-       while (!on_stack(info, (void *)addr, len))
-               if (get_stack_info(info->next_sp, state->task, info,
-                                  &state->stack_mask))
-                       return false;
+       if (!on_stack(info, addr, len) &&
+           (get_stack_info(addr, state->task, info, &state->stack_mask)))
+               return false;
 
        return true;
 }
@@ -283,42 +276,32 @@ static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
        return true;
 }
 
-#define REGS_SIZE (sizeof(struct pt_regs))
-#define SP_OFFSET (offsetof(struct pt_regs, sp))
-#define IRET_REGS_SIZE (REGS_SIZE - offsetof(struct pt_regs, ip))
-#define IRET_SP_OFFSET (SP_OFFSET - offsetof(struct pt_regs, ip))
-
 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
-                            unsigned long *ip, unsigned long *sp, bool full)
+                            unsigned long *ip, unsigned long *sp)
 {
-       size_t regs_size = full ? REGS_SIZE : IRET_REGS_SIZE;
-       size_t sp_offset = full ? SP_OFFSET : IRET_SP_OFFSET;
-       struct pt_regs *regs = (struct pt_regs *)(addr + regs_size - REGS_SIZE);
-
-       if (IS_ENABLED(CONFIG_X86_64)) {
-               if (!stack_access_ok(state, addr, regs_size))
-                       return false;
+       struct pt_regs *regs = (struct pt_regs *)addr;
 
-               *ip = regs->ip;
-               *sp = regs->sp;
+       /* x86-32 support will be more complicated due to the &regs->sp hack */
+       BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
 
-               return true;
-       }
-
-       if (!stack_access_ok(state, addr, sp_offset))
+       if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
                return false;
 
        *ip = regs->ip;
+       *sp = regs->sp;
+       return true;
+}
 
-       if (user_mode(regs)) {
-               if (!stack_access_ok(state, addr + sp_offset,
-                                    REGS_SIZE - SP_OFFSET))
-                       return false;
+static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
+                                 unsigned long *ip, unsigned long *sp)
+{
+       struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
 
-               *sp = regs->sp;
-       } else
-               *sp = (unsigned long)&regs->sp;
+       if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
+               return false;
 
+       *ip = regs->ip;
+       *sp = regs->sp;
        return true;
 }
 
@@ -327,7 +310,6 @@ bool unwind_next_frame(struct unwind_state *state)
        unsigned long ip_p, sp, orig_ip, prev_sp = state->sp;
        enum stack_type prev_type = state->stack_info.type;
        struct orc_entry *orc;
-       struct pt_regs *ptregs;
        bool indirect = false;
 
        if (unwind_done(state))
@@ -435,7 +417,7 @@ bool unwind_next_frame(struct unwind_state *state)
                break;
 
        case ORC_TYPE_REGS:
-               if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) {
+               if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
                        orc_warn("can't dereference registers at %p for ip %pB\n",
                                 (void *)sp, (void *)orig_ip);
                        goto done;
@@ -447,20 +429,14 @@ bool unwind_next_frame(struct unwind_state *state)
                break;
 
        case ORC_TYPE_REGS_IRET:
-               if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) {
+               if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
                        orc_warn("can't dereference iret registers at %p for ip %pB\n",
                                 (void *)sp, (void *)orig_ip);
                        goto done;
                }
 
-               ptregs = container_of((void *)sp, struct pt_regs, ip);
-               if ((unsigned long)ptregs >= prev_sp &&
-                   on_stack(&state->stack_info, ptregs, REGS_SIZE)) {
-                       state->regs = ptregs;
-                       state->full_regs = false;
-               } else
-                       state->regs = NULL;
-
+               state->regs = (void *)sp - IRET_FRAME_OFFSET;
+               state->full_regs = false;
                state->signal = true;
                break;
 
@@ -553,8 +529,18 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
        }
 
        if (get_stack_info((unsigned long *)state->sp, state->task,
-                          &state->stack_info, &state->stack_mask))
-               return;
+                          &state->stack_info, &state->stack_mask)) {
+               /*
+                * We weren't on a valid stack.  It's possible that
+                * we overflowed a valid stack into a guard page.
+                * See if the next page up is valid so that we can
+                * generate some kind of backtrace if this happens.
+                */
+               void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
+               if (get_stack_info(next_page, state->task, &state->stack_info,
+                                  &state->stack_mask))
+                       return;
+       }
 
        /*
         * The caller can provide the address of the first frame directly
index a4009fb9be8725ce7bda96cd5e8160e524903266..d2a8b5a24a44a554e2f81f3b30309ef39aba0d8a 100644 (file)
@@ -107,6 +107,15 @@ SECTIONS
                SOFTIRQENTRY_TEXT
                *(.fixup)
                *(.gnu.warning)
+
+#ifdef CONFIG_X86_64
+               . = ALIGN(PAGE_SIZE);
+               _entry_trampoline = .;
+               *(.entry_trampoline)
+               . = ALIGN(PAGE_SIZE);
+               ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
+#endif
+
                /* End of text section */
                _etext = .;
        } :text = 0x9090
index abe74f779f9d793e9a6c2f19417f23b5aa7ce484..b514b2b2845a334d4b53f28ed0b73c96f12d0e6a 100644 (file)
@@ -2390,9 +2390,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 }
 
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
-                                    u64 cr0, u64 cr4)
+                                   u64 cr0, u64 cr3, u64 cr4)
 {
        int bad;
+       u64 pcid;
+
+       /* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
+       pcid = 0;
+       if (cr4 & X86_CR4_PCIDE) {
+               pcid = cr3 & 0xfff;
+               cr3 &= ~0xfff;
+       }
+
+       bad = ctxt->ops->set_cr(ctxt, 3, cr3);
+       if (bad)
+               return X86EMUL_UNHANDLEABLE;
 
        /*
         * First enable PAE, long mode needs it before CR0.PG = 1 is set.
@@ -2411,6 +2423,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
                bad = ctxt->ops->set_cr(ctxt, 4, cr4);
                if (bad)
                        return X86EMUL_UNHANDLEABLE;
+               if (pcid) {
+                       bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
+                       if (bad)
+                               return X86EMUL_UNHANDLEABLE;
+               }
+
        }
 
        return X86EMUL_CONTINUE;
@@ -2421,11 +2439,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
        struct desc_struct desc;
        struct desc_ptr dt;
        u16 selector;
-       u32 val, cr0, cr4;
+       u32 val, cr0, cr3, cr4;
        int i;
 
        cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
-       ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
+       cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
        ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
        ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
 
@@ -2467,14 +2485,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 
        ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
 
-       return rsm_enter_protected_mode(ctxt, cr0, cr4);
+       return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
        struct desc_struct desc;
        struct desc_ptr dt;
-       u64 val, cr0, cr4;
+       u64 val, cr0, cr3, cr4;
        u32 base3;
        u16 selector;
        int i, r;
@@ -2491,7 +2509,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
        cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
-       ctxt->ops->set_cr(ctxt, 3,  GET_SMSTATE(u64, smbase, 0x7f50));
+       cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
        cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
        ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
        val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
@@ -2519,7 +2537,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
        ctxt->ops->set_gdt(ctxt, &dt);
 
-       r = rsm_enter_protected_mode(ctxt, cr0, cr4);
+       r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
        if (r != X86EMUL_CONTINUE)
                return r;
 
index e5e66e5c664057bb5cc5ad2660008ccbf19b69e5..c4deb1f34faa6ce7ffe6bcaaebddc3e87b2a9a69 100644 (file)
@@ -3395,7 +3395,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
                spin_lock(&vcpu->kvm->mmu_lock);
                if(make_mmu_pages_available(vcpu) < 0) {
                        spin_unlock(&vcpu->kvm->mmu_lock);
-                       return 1;
+                       return -ENOSPC;
                }
                sp = kvm_mmu_get_page(vcpu, 0, 0,
                                vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
@@ -3410,7 +3410,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
                        spin_lock(&vcpu->kvm->mmu_lock);
                        if (make_mmu_pages_available(vcpu) < 0) {
                                spin_unlock(&vcpu->kvm->mmu_lock);
-                               return 1;
+                               return -ENOSPC;
                        }
                        sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
                                        i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
@@ -3450,7 +3450,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                spin_lock(&vcpu->kvm->mmu_lock);
                if (make_mmu_pages_available(vcpu) < 0) {
                        spin_unlock(&vcpu->kvm->mmu_lock);
-                       return 1;
+                       return -ENOSPC;
                }
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
                                vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
@@ -3487,7 +3487,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                spin_lock(&vcpu->kvm->mmu_lock);
                if (make_mmu_pages_available(vcpu) < 0) {
                        spin_unlock(&vcpu->kvm->mmu_lock);
-                       return 1;
+                       return -ENOSPC;
                }
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
                                      0, ACC_ALL);
index 8eba631c4dbd509d8687c6135e8dba267042f5e0..023afa0c8887002d6a79a8b121b46996feec1a61 100644 (file)
@@ -2302,7 +2302,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                 * processors.  See 22.2.4.
                 */
                vmcs_writel(HOST_TR_BASE,
-                           (unsigned long)this_cpu_ptr(&cpu_tss));
+                           (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
                vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
 
                /*
index faf843c9b916ead0992d0b155a138c6afdf7ae57..1cec2c62a0b08405d2bd7c8908d6b7f33de3b63c 100644 (file)
@@ -4384,7 +4384,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
                                         addr, n, v))
                    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
                        break;
-               trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
+               trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
                handled += n;
                addr += n;
                len -= n;
@@ -4643,7 +4643,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
 {
        if (vcpu->mmio_read_completed) {
                trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
-                              vcpu->mmio_fragments[0].gpa, *(u64 *)val);
+                              vcpu->mmio_fragments[0].gpa, val);
                vcpu->mmio_read_completed = 0;
                return 1;
        }
@@ -4665,14 +4665,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
 
 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
 {
-       trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
+       trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
        return vcpu_mmio_write(vcpu, gpa, bytes, val);
 }
 
 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
                          void *val, int bytes)
 {
-       trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
+       trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
        return X86EMUL_IO_NEEDED;
 }
 
@@ -7264,13 +7264,12 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
-       struct fpu *fpu = &current->thread.fpu;
        int r;
 
-       fpu__initialize(fpu);
-
        kvm_sigset_activate(vcpu);
 
+       kvm_load_guest_fpu(vcpu);
+
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                if (kvm_run->immediate_exit) {
                        r = -EINTR;
@@ -7296,14 +7295,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                }
        }
 
-       kvm_load_guest_fpu(vcpu);
-
        if (unlikely(vcpu->arch.complete_userspace_io)) {
                int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
                vcpu->arch.complete_userspace_io = NULL;
                r = cui(vcpu);
                if (r <= 0)
-                       goto out_fpu;
+                       goto out;
        } else
                WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
 
@@ -7312,9 +7309,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        else
                r = vcpu_run(vcpu);
 
-out_fpu:
-       kvm_put_guest_fpu(vcpu);
 out:
+       kvm_put_guest_fpu(vcpu);
        post_kvm_run_save(vcpu);
        kvm_sigset_deactivate(vcpu);
 
@@ -7384,7 +7380,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 #endif
 
        kvm_rip_write(vcpu, regs->rip);
-       kvm_set_rflags(vcpu, regs->rflags);
+       kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
 
        vcpu->arch.exception.pending = false;
 
@@ -7498,6 +7494,29 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
 }
 EXPORT_SYMBOL_GPL(kvm_task_switch);
 
+int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) {
+               /*
+                * When EFER.LME and CR0.PG are set, the processor is in
+                * 64-bit mode (though maybe in a 32-bit code segment).
+                * CR4.PAE and EFER.LMA must be set.
+                */
+               if (!(sregs->cr4 & X86_CR4_PAE_BIT)
+                   || !(sregs->efer & EFER_LMA))
+                       return -EINVAL;
+       } else {
+               /*
+                * Not in 64-bit mode: EFER.LMA is clear and the code
+                * segment cannot be 64-bit.
+                */
+               if (sregs->efer & EFER_LMA || sregs->cs.l)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
@@ -7510,6 +7529,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                        (sregs->cr4 & X86_CR4_OSXSAVE))
                return -EINVAL;
 
+       if (kvm_valid_sregs(vcpu, sregs))
+               return -EINVAL;
+
        apic_base_msr.data = sregs->apic_base;
        apic_base_msr.host_initiated = true;
        if (kvm_set_apic_base(vcpu, &apic_base_msr))
index 553f8fd23cc4733d0edafa862b95446f7a04bab1..4846eff7e4c8b1505501d7f1dcb64127d0a4c67c 100644 (file)
@@ -107,10 +107,10 @@ static void delay_mwaitx(unsigned long __loops)
                delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
 
                /*
-                * Use cpu_tss as a cacheline-aligned, seldomly
+                * Use cpu_tss_rw as a cacheline-aligned, seldomly
                 * accessed per-cpu variable as the monitor target.
                 */
-               __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0);
+               __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
 
                /*
                 * AMD, like Intel, supports the EAX hint and EAX=0xf
index febf6980e6535572f998cf2fa0ee63d296bdc6f1..06fe3d51d385b88111961c0b5addc673fcd597a2 100644 (file)
@@ -860,7 +860,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
+       printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
                tsk->comm, task_pid_nr(tsk), address,
                (void *)regs->ip, (void *)regs->sp, error_code);
index 99dfed6dfef8b2f9028f82b89ab8dc2bde8173c4..9ec70d780f1f4172e3c69068f55722d13f003b06 100644 (file)
@@ -277,6 +277,7 @@ void __init kasan_early_init(void)
 void __init kasan_init(void)
 {
        int i;
+       void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
 
 #ifdef CONFIG_KASAN_INLINE
        register_die_notifier(&kasan_die_notifier);
@@ -329,8 +330,23 @@ void __init kasan_init(void)
                              (unsigned long)kasan_mem_to_shadow(_end),
                              early_pfn_to_nid(__pa(_stext)));
 
+       shadow_cpu_entry_begin = (void *)__fix_to_virt(FIX_CPU_ENTRY_AREA_BOTTOM);
+       shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
+       shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
+                                               PAGE_SIZE);
+
+       shadow_cpu_entry_end = (void *)(__fix_to_virt(FIX_CPU_ENTRY_AREA_TOP) + PAGE_SIZE);
+       shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
+       shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
+                                       PAGE_SIZE);
+
        kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
-                       (void *)KASAN_SHADOW_END);
+                                  shadow_cpu_entry_begin);
+
+       kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
+                             (unsigned long)shadow_cpu_entry_end, 0);
+
+       kasan_populate_zero_shadow(shadow_cpu_entry_end, (void *)KASAN_SHADOW_END);
 
        load_cr3(init_top_pgt);
        __flush_tlb_all();
index 36a28eddb435e72d2abc5ffbdd1e78a46b56876e..a7d966964c6f20577c927cf5e618bc86b3331977 100644 (file)
@@ -152,17 +152,19 @@ static void do_fpu_end(void)
 static void fix_processor_context(void)
 {
        int cpu = smp_processor_id();
-       struct tss_struct *t = &per_cpu(cpu_tss, cpu);
 #ifdef CONFIG_X86_64
        struct desc_struct *desc = get_cpu_gdt_rw(cpu);
        tss_desc tss;
 #endif
-       set_tss_desc(cpu, t);   /*
-                                * This just modifies memory; should not be
-                                * necessary. But... This is necessary, because
-                                * 386 hardware has concept of busy TSS or some
-                                * similar stupidity.
-                                */
+
+       /*
+        * We need to reload TR, which requires that we change the
+        * GDT entry to indicate "available" first.
+        *
+        * XXX: This could probably all be replaced by a call to
+        * force_reload_TR().
+        */
+       set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
 
 #ifdef CONFIG_X86_64
        memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
index f2414c6c5e7c455b43fc45773fbd1264cf86c24e..7beeee1443b32a3fbcf3ba6ad57594d46c7a359f 100644 (file)
@@ -826,7 +826,7 @@ static void xen_load_sp0(unsigned long sp0)
        mcs = xen_mc_entry(0);
        MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
        xen_mc_issue(PARAVIRT_LAZY_CPU);
-       this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
+       this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
 }
 
 void xen_set_iopl_mask(unsigned mask)
index fc048ec686e7699b263254c79b482ccf935c21ef..6cf801ca11428fa5fd9c2d3c9931354f28575580 100644 (file)
@@ -2272,7 +2272,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 #endif
        case FIX_TEXT_POKE0:
        case FIX_TEXT_POKE1:
-       case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END:
+       case FIX_CPU_ENTRY_AREA_TOP ... FIX_CPU_ENTRY_AREA_BOTTOM:
                /* All local page mappings */
                pte = pfn_pte(phys, prot);
                break;
index 8bfdea58159ba9ffd972dd95717e0eee99101e0a..9ef6cf3addb38cae822d0e5c5ef18ba9e98cd2d7 100644 (file)
@@ -599,6 +599,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
        bio->bi_disk = bio_src->bi_disk;
        bio->bi_partno = bio_src->bi_partno;
        bio_set_flag(bio, BIO_CLONED);
+       if (bio_flagged(bio_src, BIO_THROTTLED))
+               bio_set_flag(bio, BIO_THROTTLED);
        bio->bi_opf = bio_src->bi_opf;
        bio->bi_write_hint = bio_src->bi_write_hint;
        bio->bi_iter = bio_src->bi_iter;
index b21f8e86f1207f9b76bf3e2083fcf72b5062f0b7..d3a94719f03fb2af81d6270d6fc9ed58f0dde373 100644 (file)
 #include "blk.h"
 
 /*
- * Append a bio to a passthrough request.  Only works can be merged into
- * the request based on the driver constraints.
+ * Append a bio to a passthrough request.  Only works if the bio can be merged
+ * into the request based on the driver constraints.
  */
-int blk_rq_append_bio(struct request *rq, struct bio *bio)
+int blk_rq_append_bio(struct request *rq, struct bio **bio)
 {
-       blk_queue_bounce(rq->q, &bio);
+       struct bio *orig_bio = *bio;
+
+       blk_queue_bounce(rq->q, bio);
 
        if (!rq->bio) {
-               blk_rq_bio_prep(rq->q, rq, bio);
+               blk_rq_bio_prep(rq->q, rq, *bio);
        } else {
-               if (!ll_back_merge_fn(rq->q, rq, bio))
+               if (!ll_back_merge_fn(rq->q, rq, *bio)) {
+                       if (orig_bio != *bio) {
+                               bio_put(*bio);
+                               *bio = orig_bio;
+                       }
                        return -EINVAL;
+               }
 
-               rq->biotail->bi_next = bio;
-               rq->biotail = bio;
-               rq->__data_len += bio->bi_iter.bi_size;
+               rq->biotail->bi_next = *bio;
+               rq->biotail = *bio;
+               rq->__data_len += (*bio)->bi_iter.bi_size;
        }
 
        return 0;
@@ -73,14 +80,12 @@ static int __blk_rq_map_user_iov(struct request *rq,
         * We link the bounce buffer in and could have to traverse it
         * later so we have to get a ref to prevent it from being freed
         */
-       ret = blk_rq_append_bio(rq, bio);
-       bio_get(bio);
+       ret = blk_rq_append_bio(rq, &bio);
        if (ret) {
-               bio_endio(bio);
                __blk_rq_unmap_user(orig_bio);
-               bio_put(bio);
                return ret;
        }
+       bio_get(bio);
 
        return 0;
 }
@@ -213,7 +218,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        int reading = rq_data_dir(rq) == READ;
        unsigned long addr = (unsigned long) kbuf;
        int do_copy = 0;
-       struct bio *bio;
+       struct bio *bio, *orig_bio;
        int ret;
 
        if (len > (queue_max_hw_sectors(q) << 9))
@@ -236,10 +241,11 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        if (do_copy)
                rq->rq_flags |= RQF_COPY_USER;
 
-       ret = blk_rq_append_bio(rq, bio);
+       orig_bio = bio;
+       ret = blk_rq_append_bio(rq, &bio);
        if (unlikely(ret)) {
                /* request is too big */
-               bio_put(bio);
+               bio_put(orig_bio);
                return ret;
        }
 
index 825bc29767e6699ac85675d319a9866b70cc9b84..d19f416d61012ac032c49608f0afe463c948e8bc 100644 (file)
@@ -2226,13 +2226,7 @@ again:
 out_unlock:
        spin_unlock_irq(q->queue_lock);
 out:
-       /*
-        * As multiple blk-throtls may stack in the same issue path, we
-        * don't want bios to leave with the flag set.  Clear the flag if
-        * being issued.
-        */
-       if (!throttled)
-               bio_clear_flag(bio, BIO_THROTTLED);
+       bio_set_flag(bio, BIO_THROTTLED);
 
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
        if (throttled || !td->track_bio_latency)
index fceb1a96480bfb9600e4664fa2b4992c8bb64210..1d05c422c932ad56d705f94deed6cce0891ff9d3 100644 (file)
@@ -200,6 +200,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
        unsigned i = 0;
        bool bounce = false;
        int sectors = 0;
+       bool passthrough = bio_is_passthrough(*bio_orig);
 
        bio_for_each_segment(from, *bio_orig, iter) {
                if (i++ < BIO_MAX_PAGES)
@@ -210,13 +211,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
        if (!bounce)
                return;
 
-       if (sectors < bio_sectors(*bio_orig)) {
+       if (!passthrough && sectors < bio_sectors(*bio_orig)) {
                bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
                bio_chain(bio, *bio_orig);
                generic_make_request(*bio_orig);
                *bio_orig = bio;
        }
-       bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set);
+       bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
+                       bounce_bio_set);
 
        bio_for_each_segment_all(to, bio, i) {
                struct page *page = to->bv_page;
index b4df317c291692f01138b91608dc6c80f71bb9aa..f95c60774ce8ca613417d3ccf54bee52010752ee 100644 (file)
@@ -100,9 +100,13 @@ struct kyber_hctx_data {
        unsigned int cur_domain;
        unsigned int batching;
        wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
+       struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
        atomic_t wait_index[KYBER_NUM_DOMAINS];
 };
 
+static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
+                            void *key);
+
 static int rq_sched_domain(const struct request *rq)
 {
        unsigned int op = rq->cmd_flags;
@@ -385,6 +389,9 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 
        for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
                INIT_LIST_HEAD(&khd->rqs[i]);
+               init_waitqueue_func_entry(&khd->domain_wait[i],
+                                         kyber_domain_wake);
+               khd->domain_wait[i].private = hctx;
                INIT_LIST_HEAD(&khd->domain_wait[i].entry);
                atomic_set(&khd->wait_index[i], 0);
        }
@@ -524,35 +531,39 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
        int nr;
 
        nr = __sbitmap_queue_get(domain_tokens);
-       if (nr >= 0)
-               return nr;
 
        /*
         * If we failed to get a domain token, make sure the hardware queue is
         * run when one becomes available. Note that this is serialized on
         * khd->lock, but we still need to be careful about the waker.
         */
-       if (list_empty_careful(&wait->entry)) {
-               init_waitqueue_func_entry(wait, kyber_domain_wake);
-               wait->private = hctx;
+       if (nr < 0 && list_empty_careful(&wait->entry)) {
                ws = sbq_wait_ptr(domain_tokens,
                                  &khd->wait_index[sched_domain]);
+               khd->domain_ws[sched_domain] = ws;
                add_wait_queue(&ws->wait, wait);
 
                /*
                 * Try again in case a token was freed before we got on the wait
-                * queue. The waker may have already removed the entry from the
-                * wait queue, but list_del_init() is okay with that.
+                * queue.
                 */
                nr = __sbitmap_queue_get(domain_tokens);
-               if (nr >= 0) {
-                       unsigned long flags;
+       }
 
-                       spin_lock_irqsave(&ws->wait.lock, flags);
-                       list_del_init(&wait->entry);
-                       spin_unlock_irqrestore(&ws->wait.lock, flags);
-               }
+       /*
+        * If we got a token while we were on the wait queue, remove ourselves
+        * from the wait queue to ensure that all wake ups make forward
+        * progress. It's possible that the waker already deleted the entry
+        * between the !list_empty_careful() check and us grabbing the lock, but
+        * list_del_init() is okay with that.
+        */
+       if (nr >= 0 && !list_empty_careful(&wait->entry)) {
+               ws = khd->domain_ws[sched_domain];
+               spin_lock_irq(&ws->wait.lock);
+               list_del_init(&wait->entry);
+               spin_unlock_irq(&ws->wait.lock);
        }
+
        return nr;
 }
 
index 6742f6c68034c5e833505d294902dd97c274c1b0..9bff853e85f37831d8d053a2aa363f139537c9b5 100644 (file)
@@ -1007,7 +1007,7 @@ skip:
        /* The record may be cleared by others, try read next record */
        if (len == -ENOENT)
                goto skip;
-       else if (len < sizeof(*rcd)) {
+       else if (len < 0 || len < sizeof(*rcd)) {
                rc = -EIO;
                goto out;
        }
index 30e84cc600ae6438c25aec2f2975ae4e3f144553..06ea4749ebd9826a3d7b8b0a9798a1cc797f4d61 100644 (file)
@@ -1171,7 +1171,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
        struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
        struct cpc_register_resource *desired_reg;
        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
-       struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+       struct cppc_pcc_data *pcc_ss_data;
        int ret = 0;
 
        if (!cpc_desc || pcc_ss_id < 0) {
index b1a6e327cb23d44d10a2952d9e0aa5f356528b24..cf889fc62ac7fdd65a0995df47575bbb4fbe2df7 100644 (file)
@@ -83,7 +83,8 @@ static void bcma_core_pcie2_hw_ltr_war(struct bcma_drv_pcie2 *pcie2)
                bcma_core_pcie2_set_ltr_vals(pcie2);
 
                /* TODO:
-               si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0); */
+                *si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0);
+                */
 
                /* enable the LTR */
                devstsctr2 |= PCIE2_CAP_DEVSTSCTRL2_LTRENAB;
index ccb9975a97fa3f214d658776450ab618bae26643..ad0477ae820f040affe54f4368d3a02d9da63350 100644 (file)
@@ -35,13 +35,13 @@ static inline u64 mb_per_tick(int mbps)
 struct nullb_cmd {
        struct list_head list;
        struct llist_node ll_list;
-       call_single_data_t csd;
+       struct __call_single_data csd;
        struct request *rq;
        struct bio *bio;
        unsigned int tag;
+       blk_status_t error;
        struct nullb_queue *nq;
        struct hrtimer timer;
-       blk_status_t error;
 };
 
 struct nullb_queue {
index 60e1c7d6986db52e0640aa5f377f35f505590737..45a2f59cd93552d58efbc3a78eab4bf1c95126ae 100644 (file)
@@ -31,6 +31,16 @@ config BT_HCIBTUSB
          Say Y here to compile support for Bluetooth USB devices into the
          kernel or say M to compile it as module (btusb).
 
+config BT_HCIBTUSB_AUTOSUSPEND
+       bool "Enable USB autosuspend for Bluetooth USB devices by default"
+       depends on BT_HCIBTUSB
+       help
+         Say Y here to enable USB autosuspend for Bluetooth USB devices by
+         default.
+
+         This can be overridden by passing btusb.enable_autosuspend=[y|n]
+         on the kernel commandline.
+
 config BT_HCIBTUSB_BCM
        bool "Broadcom protocol support"
        depends on BT_HCIBTUSB
index d513ef4743dce272634e8012a48305a0e83de3e6..82437a69f99c8dfa562dc2e461a0395680b30d89 100644 (file)
@@ -302,9 +302,7 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
                        }
 
                        /* Wait until the command reaches the baseband */
-                       prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
-                       schedule_timeout(HZ/10);
-                       finish_wait(&wq, &wait);
+                       mdelay(100);
 
                        /* Set baud on baseband */
                        info->ctrl_reg &= ~0x03;
@@ -316,9 +314,7 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
                        outb(info->ctrl_reg, iobase + REG_CONTROL);
 
                        /* Wait before the next HCI packet can be send */
-                       prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
-                       schedule_timeout(HZ);
-                       finish_wait(&wq, &wait);
+                       mdelay(1000);
                }
 
                if (len == skb->len) {
index 663bed63b87159e6c069d084f9654e9e2f872c28..2c9a5fc9137da0d3d48b376c9e3b11ca0c99f265 100644 (file)
@@ -88,7 +88,8 @@ static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb)
                break;
        }
 
-       kfree_skb(skb);
+       if (!ret)
+               kfree_skb(skb);
 
        return ret;
 }
index c8e945d19ffebc2a819e260b68c8978011eceab0..20142bc77554c0f3400da0506a6158e94722f54b 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/errno.h>
 #include <linux/skbuff.h>
 
+#include <linux/mmc/host.h>
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/sdio_func.h>
 
@@ -292,6 +293,14 @@ static int btsdio_probe(struct sdio_func *func,
                tuple = tuple->next;
        }
 
+       /* BCM43341 devices soldered onto the PCB (non-removable) use an
+        * uart connection for bluetooth, ignore the BT SDIO interface.
+        */
+       if (func->vendor == SDIO_VENDOR_ID_BROADCOM &&
+           func->device == SDIO_DEVICE_ID_BROADCOM_43341 &&
+           !mmc_card_is_removable(func->card->host))
+               return -ENODEV;
+
        data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
index f7120c9eb9bd2cc5bcd90ddf72e513082269ad63..808c249845db681e8a64b8de9b127ed29f25a237 100644 (file)
@@ -40,6 +40,7 @@
 
 static bool disable_scofix;
 static bool force_scofix;
+static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
 
 static bool reset = true;
 
@@ -3213,6 +3214,9 @@ static int btusb_probe(struct usb_interface *intf,
        }
 #endif
 
+       if (enable_autosuspend)
+               usb_enable_autosuspend(data->udev);
+
        err = hci_register_dev(hdev);
        if (err < 0)
                goto out_free_dev;
@@ -3425,6 +3429,9 @@ MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size");
 module_param(force_scofix, bool, 0644);
 MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size");
 
+module_param(enable_autosuspend, bool, 0644);
+MODULE_PARM_DESC(enable_autosuspend, "Enable USB autosuspend by default");
+
 module_param(reset, bool, 0644);
 MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
 
index 707c2d1b84c7b70e02fcb7d53117bcf4ef6c6a52..47fc58c9eb49dc5fdd90188f5169d3fccad5b6e7 100644 (file)
@@ -939,6 +939,7 @@ static const struct acpi_device_id bcm_acpi_match[] = {
        { "BCM2E65", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
        { "BCM2E67", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
        { "BCM2E71", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+       { "BCM2E72", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
        { "BCM2E7B", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
        { "BCM2E7C", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
        { "BCM2E7E", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
index e2c078d61730c7c5d9cb6b46bbbe6ef399fecaa7..1b4417a623a412a995da4c800354cd03868815dd 100644 (file)
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <linux/gpio/consumer.h>
+#include <linux/nvmem-consumer.h>
 
 #include "hci_uart.h"
 
+/* Vendor-specific HCI commands */
+#define HCI_VS_WRITE_BD_ADDR                   0xfc06
+#define HCI_VS_UPDATE_UART_HCI_BAUDRATE                0xff36
+
 /* HCILL commands */
 #define HCILL_GO_TO_SLEEP_IND  0x30
 #define HCILL_GO_TO_SLEEP_ACK  0x31
@@ -86,6 +91,7 @@ struct ll_device {
        struct serdev_device *serdev;
        struct gpio_desc *enable_gpio;
        struct clk *ext_clk;
+       bdaddr_t bdaddr;
 };
 
 struct ll_struct {
@@ -620,7 +626,7 @@ static int download_firmware(struct ll_device *lldev)
                case ACTION_SEND_COMMAND:       /* action send */
                        bt_dev_dbg(lldev->hu.hdev, "S");
                        cmd = (struct hci_command *)action_ptr;
-                       if (cmd->opcode == 0xff36) {
+                       if (cmd->opcode == HCI_VS_UPDATE_UART_HCI_BAUDRATE) {
                                /* ignore remote change
                                 * baud rate HCI VS command
                                 */
@@ -628,11 +634,11 @@ static int download_firmware(struct ll_device *lldev)
                                break;
                        }
                        if (cmd->prefix != 1)
-                               bt_dev_dbg(lldev->hu.hdev, "command type %d\n", cmd->prefix);
+                               bt_dev_dbg(lldev->hu.hdev, "command type %d", cmd->prefix);
 
                        skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen, &cmd->speed, HCI_INIT_TIMEOUT);
                        if (IS_ERR(skb)) {
-                               bt_dev_err(lldev->hu.hdev, "send command failed\n");
+                               bt_dev_err(lldev->hu.hdev, "send command failed");
                                err = PTR_ERR(skb);
                                goto out_rel_fw;
                        }
@@ -659,6 +665,24 @@ out_rel_fw:
        return err;
 }
 
+static int ll_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+       bdaddr_t bdaddr_swapped;
+       struct sk_buff *skb;
+
+       /* HCI_VS_WRITE_BD_ADDR (at least on a CC2560A chip) expects the BD
+        * address to be MSB first, but bdaddr_t has the convention of being
+        * LSB first.
+        */
+       baswap(&bdaddr_swapped, bdaddr);
+       skb = __hci_cmd_sync(hdev, HCI_VS_WRITE_BD_ADDR, sizeof(bdaddr_t),
+                            &bdaddr_swapped, HCI_INIT_TIMEOUT);
+       if (!IS_ERR(skb))
+               kfree_skb(skb);
+
+       return PTR_ERR_OR_ZERO(skb);
+}
+
 static int ll_setup(struct hci_uart *hu)
 {
        int err, retry = 3;
@@ -671,14 +695,20 @@ static int ll_setup(struct hci_uart *hu)
 
        lldev = serdev_device_get_drvdata(serdev);
 
+       hu->hdev->set_bdaddr = ll_set_bdaddr;
+
        serdev_device_set_flow_control(serdev, true);
 
        do {
-               /* Configure BT_EN to HIGH state */
+               /* Reset the Bluetooth device */
                gpiod_set_value_cansleep(lldev->enable_gpio, 0);
                msleep(5);
                gpiod_set_value_cansleep(lldev->enable_gpio, 1);
-               msleep(100);
+               err = serdev_device_wait_for_cts(serdev, true, 200);
+               if (err) {
+                       bt_dev_err(hu->hdev, "Failed to get CTS");
+                       return err;
+               }
 
                err = download_firmware(lldev);
                if (!err)
@@ -691,6 +721,18 @@ static int ll_setup(struct hci_uart *hu)
        if (err)
                return err;
 
+       /* Set BD address if one was specified at probe */
+       if (!bacmp(&lldev->bdaddr, BDADDR_NONE)) {
+               /* This means that there was an error getting the BD address
+                * during probe, so mark the device as having a bad address.
+                */
+               set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+       } else if (bacmp(&lldev->bdaddr, BDADDR_ANY)) {
+               err = ll_set_bdaddr(hu->hdev, &lldev->bdaddr);
+               if (err)
+                       set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+       }
+
        /* Operational speed if any */
        if (hu->oper_speed)
                speed = hu->oper_speed;
@@ -700,7 +742,12 @@ static int ll_setup(struct hci_uart *hu)
                speed = 0;
 
        if (speed) {
-               struct sk_buff *skb = __hci_cmd_sync(hu->hdev, 0xff36, sizeof(speed), &speed, HCI_INIT_TIMEOUT);
+               __le32 speed_le = cpu_to_le32(speed);
+               struct sk_buff *skb;
+
+               skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
+                                    sizeof(speed_le), &speed_le,
+                                    HCI_INIT_TIMEOUT);
                if (!IS_ERR(skb)) {
                        kfree_skb(skb);
                        serdev_device_set_baudrate(serdev, speed);
@@ -716,6 +763,7 @@ static int hci_ti_probe(struct serdev_device *serdev)
 {
        struct hci_uart *hu;
        struct ll_device *lldev;
+       struct nvmem_cell *bdaddr_cell;
        u32 max_speed = 3000000;
 
        lldev = devm_kzalloc(&serdev->dev, sizeof(struct ll_device), GFP_KERNEL);
@@ -737,6 +785,52 @@ static int hci_ti_probe(struct serdev_device *serdev)
        of_property_read_u32(serdev->dev.of_node, "max-speed", &max_speed);
        hci_uart_set_speeds(hu, 115200, max_speed);
 
+       /* optional BD address from nvram */
+       bdaddr_cell = nvmem_cell_get(&serdev->dev, "bd-address");
+       if (IS_ERR(bdaddr_cell)) {
+               int err = PTR_ERR(bdaddr_cell);
+
+               if (err == -EPROBE_DEFER)
+                       return err;
+
+               /* ENOENT means there is no matching nvmem cell and ENOSYS
+                * means that nvmem is not enabled in the kernel configuration.
+                */
+               if (err != -ENOENT && err != -ENOSYS) {
+                       /* If there was some other error, give userspace a
+                        * chance to fix the problem instead of failing to load
+                        * the driver. Using BDADDR_NONE as a flag that is
+                        * tested later in the setup function.
+                        */
+                       dev_warn(&serdev->dev,
+                                "Failed to get \"bd-address\" nvmem cell (%d)\n",
+                                err);
+                       bacpy(&lldev->bdaddr, BDADDR_NONE);
+               }
+       } else {
+               bdaddr_t *bdaddr;
+               size_t len;
+
+               bdaddr = nvmem_cell_read(bdaddr_cell, &len);
+               nvmem_cell_put(bdaddr_cell);
+               if (IS_ERR(bdaddr)) {
+                       dev_err(&serdev->dev, "Failed to read nvmem bd-address\n");
+                       return PTR_ERR(bdaddr);
+               }
+               if (len != sizeof(bdaddr_t)) {
+                       dev_err(&serdev->dev, "Invalid nvmem bd-address length\n");
+                       kfree(bdaddr);
+                       return -EINVAL;
+               }
+
+               /* As per the device tree bindings, the value from nvmem is
+                * expected to be MSB first, but in the kernel it is expected
+                * that bdaddr_t is LSB first.
+                */
+               baswap(&lldev->bdaddr, bdaddr);
+               kfree(bdaddr);
+       }
+
        return hci_uart_register_device(hu, &llp);
 }
 
@@ -748,6 +842,7 @@ static void hci_ti_remove(struct serdev_device *serdev)
 }
 
 static const struct of_device_id hci_ti_of_match[] = {
+       { .compatible = "ti,cc2560" },
        { .compatible = "ti,wl1271-st" },
        { .compatible = "ti,wl1273-st" },
        { .compatible = "ti,wl1281-st" },
index bbd7db7384e6bc9664d829baecdf543721ef678f..05ec530b8a3a70b218918c5f39b94f1666b8b7a8 100644 (file)
@@ -932,6 +932,9 @@ static int qca_setup(struct hci_uart *hu)
        if (!ret) {
                set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
                qca_debugfs_init(hdev);
+       } else if (ret == -ENOENT) {
+               /* No patch/nvm-config found, run with original fw/config */
+               ret = 0;
        }
 
        /* Setup bdaddr */
index 71664b22ec9d71c04305cba182b52e6314c26011..e0e6461b9200c027f7a04d8e577738f3af967864 100644 (file)
@@ -303,6 +303,7 @@ int hci_uart_register_device(struct hci_uart *hu,
        hci_set_drvdata(hdev, hu);
 
        INIT_WORK(&hu->write_work, hci_uart_write_work);
+       percpu_init_rwsem(&hu->proto_lock);
 
        /* Only when vendor specific setup callback is provided, consider
         * the manufacturer information valid. This avoids filling in the
index 58d4f4e1ad6a907991873a03027e6c7aa2f31fc4..ca38229b045ab288a2f250dddaf1b174e8c0572f 100644 (file)
@@ -22,6 +22,8 @@
 
 #include "cpufreq_governor.h"
 
+#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL      (2 * TICK_NSEC / NSEC_PER_USEC)
+
 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
 
 static DEFINE_MUTEX(gov_dbs_data_mutex);
@@ -47,11 +49,15 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
 {
        struct dbs_data *dbs_data = to_dbs_data(attr_set);
        struct policy_dbs_info *policy_dbs;
+       unsigned int sampling_interval;
        int ret;
-       ret = sscanf(buf, "%u", &dbs_data->sampling_rate);
-       if (ret != 1)
+
+       ret = sscanf(buf, "%u", &sampling_interval);
+       if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
                return -EINVAL;
 
+       dbs_data->sampling_rate = sampling_interval;
+
        /*
         * We are operating under dbs_data->mutex and so the list and its
         * entries can't be freed concurrently.
@@ -430,7 +436,14 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
        if (ret)
                goto free_policy_dbs_info;
 
-       dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy);
+       /*
+        * The sampling interval should not be less than the transition latency
+        * of the CPU and it also cannot be too small for dbs_update() to work
+        * correctly.
+        */
+       dbs_data->sampling_rate = max_t(unsigned int,
+                                       CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
+                                       cpufreq_policy_transition_delay_us(policy));
 
        if (!have_governor_per_policy())
                gov->gdbs_data = dbs_data;
index 628fe899cb483da9dbf0f7661b537734bc82f784..d9b2c2de49c43f125c91b382f818ff81d0ffc6ac 100644 (file)
@@ -226,17 +226,18 @@ static void imx6q_opp_check_speed_grading(struct device *dev)
        val >>= OCOTP_CFG3_SPEED_SHIFT;
        val &= 0x3;
 
-       if ((val != OCOTP_CFG3_SPEED_1P2GHZ) &&
-            of_machine_is_compatible("fsl,imx6q"))
-               if (dev_pm_opp_disable(dev, 1200000000))
-                       dev_warn(dev, "failed to disable 1.2GHz OPP\n");
        if (val < OCOTP_CFG3_SPEED_996MHZ)
                if (dev_pm_opp_disable(dev, 996000000))
                        dev_warn(dev, "failed to disable 996MHz OPP\n");
-       if (of_machine_is_compatible("fsl,imx6q")) {
+
+       if (of_machine_is_compatible("fsl,imx6q") ||
+           of_machine_is_compatible("fsl,imx6qp")) {
                if (val != OCOTP_CFG3_SPEED_852MHZ)
                        if (dev_pm_opp_disable(dev, 852000000))
                                dev_warn(dev, "failed to disable 852MHz OPP\n");
+               if (val != OCOTP_CFG3_SPEED_1P2GHZ)
+                       if (dev_pm_opp_disable(dev, 1200000000))
+                               dev_warn(dev, "failed to disable 1.2GHz OPP\n");
        }
        iounmap(base);
 put_node:
index fbab271b3bf9f9506c86579c75ebe32fc3235228..a861b5b4d4437d6b3be7dcf5e9d0b3475205455d 100644 (file)
@@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
                         unsigned long flags)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
-       struct data_chunk       *first = xt->sgl;
+       struct data_chunk       *first;
        struct at_desc          *desc = NULL;
        size_t                  xfer_count;
        unsigned int            dwidth;
@@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
        if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
                return NULL;
 
+       first = xt->sgl;
+
        dev_info(chan2dev(chan),
                 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
                __func__, &xt->src_start, &xt->dst_start, xt->numf,
index d50273fed715096ac625382f6c511f537da57bf4..afd5e10f8927cb0c5573bb946a48755aad58b0aa 100644 (file)
@@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
 
        ret = dma_async_device_register(dd);
        if (ret)
-               return ret;
+               goto err_clk;
 
        irq = platform_get_irq(pdev, 0);
        ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
@@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
 
 err_unregister:
        dma_async_device_unregister(dd);
+err_clk:
+       clk_disable_unprepare(dmadev->clk);
        return ret;
 }
 
index 47edc7fbf91f52e5259060824c38eaab69ebdb56..ec5f9d2bc8202f340c615cbe43731016d316d547 100644 (file)
@@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
 #define PATTERN_COUNT_MASK     0x1f
 #define PATTERN_MEMSET_IDX     0x01
 
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+       bool                    done;
+       wait_queue_head_t       *wait;
+};
+
 struct dmatest_thread {
        struct list_head        node;
        struct dmatest_info     *info;
@@ -165,6 +171,8 @@ struct dmatest_thread {
        u8                      **dsts;
        u8                      **udsts;
        enum dma_transaction_type type;
+       wait_queue_head_t done_wait;
+       struct dmatest_done test_done;
        bool                    done;
 };
 
@@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
        return error_count;
 }
 
-/* poor man's completion - we want to use wait_event_freezable() on it */
-struct dmatest_done {
-       bool                    done;
-       wait_queue_head_t       *wait;
-};
 
 static void dmatest_callback(void *arg)
 {
        struct dmatest_done *done = arg;
-
-       done->done = true;
-       wake_up_all(done->wait);
+       struct dmatest_thread *thread =
+               container_of(arg, struct dmatest_thread, done_wait);
+       if (!thread->done) {
+               done->done = true;
+               wake_up_all(done->wait);
+       } else {
+               /*
+                * If thread->done, it means that this callback occurred
+                * after the parent thread has cleaned up. This can
+                * happen in the case that driver doesn't implement
+                * the terminate_all() functionality and a dma operation
+                * did not occur within the timeout period
+                */
+               WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
+       }
 }
 
 static unsigned int min_odd(unsigned int x, unsigned int y)
@@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
  */
 static int dmatest_func(void *data)
 {
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
        struct dmatest_thread   *thread = data;
-       struct dmatest_done     done = { .wait = &done_wait };
+       struct dmatest_done     *done = &thread->test_done;
        struct dmatest_info     *info;
        struct dmatest_params   *params;
        struct dma_chan         *chan;
@@ -673,9 +687,9 @@ static int dmatest_func(void *data)
                        continue;
                }
 
-               done.done = false;
+               done->done = false;
                tx->callback = dmatest_callback;
-               tx->callback_param = &done;
+               tx->callback_param = done;
                cookie = tx->tx_submit(tx);
 
                if (dma_submit_error(cookie)) {
@@ -688,21 +702,12 @@ static int dmatest_func(void *data)
                }
                dma_async_issue_pending(chan);
 
-               wait_event_freezable_timeout(done_wait, done.done,
+               wait_event_freezable_timeout(thread->done_wait, done->done,
                                             msecs_to_jiffies(params->timeout));
 
                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 
-               if (!done.done) {
-                       /*
-                        * We're leaving the timed out dma operation with
-                        * dangling pointer to done_wait.  To make this
-                        * correct, we'll need to allocate wait_done for
-                        * each test iteration and perform "who's gonna
-                        * free it this time?" dancing.  For now, just
-                        * leave it dangling.
-                        */
-                       WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
+               if (!done->done) {
                        dmaengine_unmap_put(um);
                        result("test timed out", total_tests, src_off, dst_off,
                               len, 0);
@@ -789,7 +794,7 @@ err_thread_type:
                dmatest_KBs(runtime, total_len), ret);
 
        /* terminate all transfers on specified channels */
-       if (ret)
+       if (ret || failed_tests)
                dmaengine_terminate_all(chan);
 
        thread->done = true;
@@ -849,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
                thread->info = info;
                thread->chan = dtc->chan;
                thread->type = type;
+               thread->test_done.wait = &thread->done_wait;
+               init_waitqueue_head(&thread->done_wait);
                smp_wmb();
                thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
                                dma_chan_name(chan), op, i);
index 6775f2c74e25b7269417bbe001adfb03698dea97..c7568869284e17d4b63379b236a0f30391640820 100644 (file)
@@ -863,11 +863,11 @@ static void fsl_edma_irq_exit(
        }
 }
 
-static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma)
+static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
 {
        int i;
 
-       for (i = 0; i < DMAMUX_NR; i++)
+       for (i = 0; i < nr_clocks; i++)
                clk_disable_unprepare(fsl_edma->muxclk[i]);
 }
 
@@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev)
 
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
                fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
-               if (IS_ERR(fsl_edma->muxbase[i]))
+               if (IS_ERR(fsl_edma->muxbase[i])) {
+                       /* on error: disable all previously enabled clks */
+                       fsl_disable_clocks(fsl_edma, i);
                        return PTR_ERR(fsl_edma->muxbase[i]);
+               }
 
                sprintf(clkname, "dmamux%d", i);
                fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
                if (IS_ERR(fsl_edma->muxclk[i])) {
                        dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
+                       /* on error: disable all previously enabled clks */
+                       fsl_disable_clocks(fsl_edma, i);
                        return PTR_ERR(fsl_edma->muxclk[i]);
                }
 
                ret = clk_prepare_enable(fsl_edma->muxclk[i]);
-               if (ret) {
-                       /* disable only clks which were enabled on error */
-                       for (; i >= 0; i--)
-                               clk_disable_unprepare(fsl_edma->muxclk[i]);
-
-                       dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
-                       return ret;
-               }
+               if (ret)
+                       /* on error: disable all previously enabled clks */
+                       fsl_disable_clocks(fsl_edma, i);
 
        }
 
@@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev,
                        "Can't register Freescale eDMA engine. (%d)\n", ret);
-               fsl_disable_clocks(fsl_edma);
+               fsl_disable_clocks(fsl_edma, DMAMUX_NR);
                return ret;
        }
 
@@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
                dev_err(&pdev->dev,
                        "Can't register Freescale eDMA of_dma. (%d)\n", ret);
                dma_async_device_unregister(&fsl_edma->dma_dev);
-               fsl_disable_clocks(fsl_edma);
+               fsl_disable_clocks(fsl_edma, DMAMUX_NR);
                return ret;
        }
 
@@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
        fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
        of_dma_controller_free(np);
        dma_async_device_unregister(&fsl_edma->dma_dev);
-       fsl_disable_clocks(fsl_edma);
+       fsl_disable_clocks(fsl_edma, DMAMUX_NR);
 
        return 0;
 }
index 2f31d3d0caa61821aa08aea360e06709bdb25d48..7792a9186f9cf35bae71792e5e0783cf53364b05 100644 (file)
@@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
        if (memcmp(src, dest, IOAT_TEST_SIZE)) {
                dev_err(dev, "Self-test copy failed compare, disabling\n");
                err = -ENODEV;
-               goto free_resources;
+               goto unmap_dma;
        }
 
 unmap_dma:
index c9714072e22465d4b23d8101038f782b084b2dca..59c82cdcf48d8a508613dbc7b1c98654285de28f 100644 (file)
@@ -377,6 +377,7 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
        u8 *ptr;
        u8 *rx_buf;
        u8 sum;
+       u8 rx_byte;
        int ret = 0, final_ret;
 
        len = cros_ec_prepare_tx(ec_dev, ec_msg);
@@ -421,25 +422,22 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
        if (!ret) {
                /* Verify that EC can process command */
                for (i = 0; i < len; i++) {
-                       switch (rx_buf[i]) {
-                       case EC_SPI_PAST_END:
-                       case EC_SPI_RX_BAD_DATA:
-                       case EC_SPI_NOT_READY:
-                               ret = -EAGAIN;
-                               ec_msg->result = EC_RES_IN_PROGRESS;
-                       default:
+                       rx_byte = rx_buf[i];
+                       if (rx_byte == EC_SPI_PAST_END  ||
+                           rx_byte == EC_SPI_RX_BAD_DATA ||
+                           rx_byte == EC_SPI_NOT_READY) {
+                               ret = -EREMOTEIO;
                                break;
                        }
-                       if (ret)
-                               break;
                }
-               if (!ret)
-                       ret = cros_ec_spi_receive_packet(ec_dev,
-                                       ec_msg->insize + sizeof(*response));
-       } else {
-               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
        }
 
+       if (!ret)
+               ret = cros_ec_spi_receive_packet(ec_dev,
+                               ec_msg->insize + sizeof(*response));
+       else
+               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
        final_ret = terminate_request(ec_dev);
 
        spi_bus_unlock(ec_spi->spi->master);
@@ -508,6 +506,7 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
        int i, len;
        u8 *ptr;
        u8 *rx_buf;
+       u8 rx_byte;
        int sum;
        int ret = 0, final_ret;
 
@@ -544,25 +543,22 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
        if (!ret) {
                /* Verify that EC can process command */
                for (i = 0; i < len; i++) {
-                       switch (rx_buf[i]) {
-                       case EC_SPI_PAST_END:
-                       case EC_SPI_RX_BAD_DATA:
-                       case EC_SPI_NOT_READY:
-                               ret = -EAGAIN;
-                               ec_msg->result = EC_RES_IN_PROGRESS;
-                       default:
+                       rx_byte = rx_buf[i];
+                       if (rx_byte == EC_SPI_PAST_END  ||
+                           rx_byte == EC_SPI_RX_BAD_DATA ||
+                           rx_byte == EC_SPI_NOT_READY) {
+                               ret = -EREMOTEIO;
                                break;
                        }
-                       if (ret)
-                               break;
                }
-               if (!ret)
-                       ret = cros_ec_spi_receive_response(ec_dev,
-                                       ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
-       } else {
-               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
        }
 
+       if (!ret)
+               ret = cros_ec_spi_receive_response(ec_dev,
+                               ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
+       else
+               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
        final_ret = terminate_request(ec_dev);
 
        spi_bus_unlock(ec_spi->spi->master);
@@ -667,6 +663,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
                           sizeof(struct ec_response_get_protocol_info);
        ec_dev->dout_size = sizeof(struct ec_host_request);
 
+       ec_spi->last_transfer_ns = ktime_get_ns();
 
        err = cros_ec_register(ec_dev);
        if (err) {
index da16bf45fab43ee9a946beef340f4cd2a224156e..dc94ffc6321a84dd25ce08d0f1a9374d40d4cead 100644 (file)
@@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
 EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
 
 static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
-                             struct device_node *node)
+                             struct device_node *parent)
 {
+       struct device_node *node;
+
        if (pdata && pdata->codec)
                return true;
 
-       if (of_find_node_by_name(node, "codec"))
+       node = of_get_child_by_name(parent, "codec");
+       if (node) {
+               of_node_put(node);
                return true;
+       }
 
        return false;
 }
index d66502d36ba0b3202d1c15c08540fa8aade42a32..dd19f17a1b637543965dd94e64d0d44b9178f64c 100644 (file)
@@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = {
 };
 
 
-static bool twl6040_has_vibra(struct device_node *node)
+static bool twl6040_has_vibra(struct device_node *parent)
 {
-#ifdef CONFIG_OF
-       if (of_find_node_by_name(node, "vibra"))
+       struct device_node *node;
+
+       node = of_get_child_by_name(parent, "vibra");
+       if (node) {
+               of_node_put(node);
                return true;
-#endif
+       }
+
        return false;
 }
 
index eda38cbe85307edd67863122e24451d269d66866..41f2a9f6851d9e74a58fd06030cb2f00517ea8d0 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/pci.h>
 #include <linux/mutex.h>
 #include <linux/miscdevice.h>
-#include <linux/pti.h>
+#include <linux/intel-pti.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
index f80e911b8843819db8dcd1956c76ce2bf60b5ab8..73b6055774474e322b07cda4144c48b5b235a55c 100644 (file)
@@ -1114,7 +1114,7 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
        if (!ops->oobbuf)
                ops->ooblen = 0;
 
-       if (offs < 0 || offs + ops->len >= mtd->size)
+       if (offs < 0 || offs + ops->len > mtd->size)
                return -EINVAL;
 
        if (ops->ooblen) {
index e0eb51d8c0129937b35157ccdc107e5ef54c038a..dd56a671ea4285af0f5079bc652ecf4a32410272 100644 (file)
@@ -1763,7 +1763,7 @@ try_dmaread:
                        err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
                                                              addr);
                        /* erased page bitflips corrected */
-                       if (err > 0)
+                       if (err >= 0)
                                return err;
                }
 
index 484f7fbc3f7d2d11cd66fc3416e64ab38d47f852..a8bde6665c24f7e20e6103959ceee16c5d3ec5c8 100644 (file)
@@ -253,9 +253,9 @@ static int gpio_nand_probe(struct platform_device *pdev)
                goto out_ce;
        }
 
-       gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
-       if (IS_ERR(gpiomtd->nwp)) {
-               ret = PTR_ERR(gpiomtd->nwp);
+       gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiomtd->ale)) {
+               ret = PTR_ERR(gpiomtd->ale);
                goto out_ce;
        }
 
index 50f8d4a1b9832326070045d0c294d22393001fbd..d4d824ef64e9fb395af3bc549daae72b96731e16 100644 (file)
@@ -1067,9 +1067,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                return ret;
        }
 
-       /* handle the block mark swapping */
-       block_mark_swapping(this, payload_virt, auxiliary_virt);
-
        /* Loop over status bytes, accumulating ECC status. */
        status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
 
@@ -1158,6 +1155,9 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                max_bitflips = max_t(unsigned int, max_bitflips, *status);
        }
 
+       /* handle the block mark swapping */
+       block_mark_swapping(this, buf, auxiliary_virt);
+
        if (oob_required) {
                /*
                 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
index 8a9b085c2a98341e9d4b829ef4cb42daea5ae36c..58c705f24f963f98a4fa9181f18ac04f4af1d616 100644 (file)
@@ -1431,13 +1431,9 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
 {
        u8 macaddr[ETH_ALEN];
        u8 *mac;
-       int i;
 
        if (newval->string) {
-               i = sscanf(newval->string, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
-                          &macaddr[0], &macaddr[1], &macaddr[2],
-                          &macaddr[3], &macaddr[4], &macaddr[5]);
-               if (i != ETH_ALEN)
+               if (!mac_pton(newval->string, macaddr))
                        goto err;
                mac = macaddr;
        } else {
index f412aad58253e28213a413bb8b7110764a3f3e34..944901f03f8be910d4fea76c06abdf56e5dd2374 100644 (file)
@@ -249,7 +249,6 @@ static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
        return -EIO;
 }
 
-/* Wait a while until mask & reg == value. Otherwise return timeout. */
 static int lan9303_read_wait(struct lan9303 *chip, int offset, u32 mask)
 {
        int i;
@@ -541,20 +540,19 @@ lan9303_alr_cache_find_mac(struct lan9303 *chip, const u8 *mac_addr)
        return NULL;
 }
 
-/* Wait a while until mask & reg == value. Otherwise return timeout. */
-static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno,
-                               int mask, char value)
+static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno, u32 mask)
 {
        int i;
 
-       for (i = 0; i < 0x1000; i++) {
+       for (i = 0; i < 25; i++) {
                u32 reg;
 
                lan9303_read_switch_reg(chip, regno, &reg);
-               if ((reg & mask) == value)
+               if (!(reg & mask))
                        return 0;
                usleep_range(1000, 2000);
        }
+
        return -ETIMEDOUT;
 }
 
@@ -564,8 +562,7 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
        lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_1, dat1);
        lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
                                 LAN9303_ALR_CMD_MAKE_ENTRY);
-       lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND,
-                            0);
+       lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND);
        lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
 
        return 0;
index a74a8fbad53a57f95ac32a980b6d96da136e5972..7a3ebfd236f5ebcd2f462608de3781c7560b0ac5 100644 (file)
@@ -2930,9 +2930,8 @@ void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
 {
        struct ethhdr *eth = (struct ethhdr *)skb->data;
-       unsigned char *buf = skb->data;
        unsigned char buffer[128];
-       unsigned int i, j;
+       unsigned int i;
 
        netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 
@@ -2943,22 +2942,13 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
        netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
        netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
 
-       for (i = 0, j = 0; i < skb->len;) {
-               j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
-                             buf[i++]);
-
-               if ((i % 32) == 0) {
-                       netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
-                       j = 0;
-               } else if ((i % 16) == 0) {
-                       buffer[j++] = ' ';
-                       buffer[j++] = ' ';
-               } else if ((i % 4) == 0) {
-                       buffer[j++] = ' ';
-               }
+       for (i = 0; i < skb->len; i += 32) {
+               unsigned int len = min(skb->len - i, 32U);
+
+               hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+                                  buffer, sizeof(buffer), false);
+               netdev_dbg(netdev, "  %#06x: %s\n", i, buffer);
        }
-       if (i % 32)
-               netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
 
        netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 }
index 3c63b16d485f4bb3a7587e9e6d36dd0e121668d2..d9efbc8d783b84b128379e0ce58a43b005a8ab58 100644 (file)
@@ -159,6 +159,8 @@ struct arc_emac_priv {
        unsigned int link;
        unsigned int duplex;
        unsigned int speed;
+
+       unsigned int rx_missed_errors;
 };
 
 /**
index 3241af1ce7182824c09ee3ad774f122565f6c940..bd277b0dc615118a58b81dfba5b040e26fa667ba 100644 (file)
@@ -26,6 +26,8 @@
 
 #include "emac.h"
 
+static void arc_emac_restart(struct net_device *ndev);
+
 /**
  * arc_emac_tx_avail - Return the number of available slots in the tx ring.
  * @priv: Pointer to ARC EMAC private data structure.
@@ -210,39 +212,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
                        continue;
                }
 
-               pktlen = info & LEN_MASK;
-               stats->rx_packets++;
-               stats->rx_bytes += pktlen;
-               skb = rx_buff->skb;
-               skb_put(skb, pktlen);
-               skb->dev = ndev;
-               skb->protocol = eth_type_trans(skb, ndev);
-
-               dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
-                                dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
-
-               /* Prepare the BD for next cycle */
-               rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
-                                                        EMAC_BUFFER_SIZE);
-               if (unlikely(!rx_buff->skb)) {
+               /* Prepare the BD for next cycle. netif_receive_skb()
+                * only if new skb was allocated and mapped to avoid holes
+                * in the RX fifo.
+                */
+               skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
+               if (unlikely(!skb)) {
+                       if (net_ratelimit())
+                               netdev_err(ndev, "cannot allocate skb\n");
+                       /* Return ownership to EMAC */
+                       rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
                        stats->rx_errors++;
-                       /* Because receive_skb is below, increment rx_dropped */
                        stats->rx_dropped++;
                        continue;
                }
 
-               /* receive_skb only if new skb was allocated to avoid holes */
-               netif_receive_skb(skb);
-
-               addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+               addr = dma_map_single(&ndev->dev, (void *)skb->data,
                                      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
                if (dma_mapping_error(&ndev->dev, addr)) {
                        if (net_ratelimit())
-                               netdev_err(ndev, "cannot dma map\n");
-                       dev_kfree_skb(rx_buff->skb);
+                               netdev_err(ndev, "cannot map dma buffer\n");
+                       dev_kfree_skb(skb);
+                       /* Return ownership to EMAC */
+                       rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
                        stats->rx_errors++;
+                       stats->rx_dropped++;
                        continue;
                }
+
+               /* unmap previosly mapped skb */
+               dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+                                dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+
+               pktlen = info & LEN_MASK;
+               stats->rx_packets++;
+               stats->rx_bytes += pktlen;
+               skb_put(rx_buff->skb, pktlen);
+               rx_buff->skb->dev = ndev;
+               rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
+
+               netif_receive_skb(rx_buff->skb);
+
+               rx_buff->skb = skb;
                dma_unmap_addr_set(rx_buff, addr, addr);
                dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
 
@@ -258,6 +269,53 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
        return work_done;
 }
 
+/**
+ * arc_emac_rx_miss_handle - handle R_MISS register
+ * @ndev:      Pointer to the net_device structure.
+ */
+static void arc_emac_rx_miss_handle(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       unsigned int miss;
+
+       miss = arc_reg_get(priv, R_MISS);
+       if (miss) {
+               stats->rx_errors += miss;
+               stats->rx_missed_errors += miss;
+               priv->rx_missed_errors += miss;
+       }
+}
+
+/**
+ * arc_emac_rx_stall_check - check RX stall
+ * @ndev:      Pointer to the net_device structure.
+ * @budget:    How many BDs requested to process on 1 call.
+ * @work_done: How many BDs processed
+ *
+ * Under certain conditions EMAC stop reception of incoming packets and
+ * continuously increment R_MISS register instead of saving data into
+ * provided buffer. This function detect that condition and restart
+ * EMAC.
+ */
+static void arc_emac_rx_stall_check(struct net_device *ndev,
+                                   int budget, unsigned int work_done)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct arc_emac_bd *rxbd;
+
+       if (work_done)
+               priv->rx_missed_errors = 0;
+
+       if (priv->rx_missed_errors && budget) {
+               rxbd = &priv->rxbd[priv->last_rx_bd];
+               if (le32_to_cpu(rxbd->info) & FOR_EMAC) {
+                       arc_emac_restart(ndev);
+                       priv->rx_missed_errors = 0;
+               }
+       }
+}
+
 /**
  * arc_emac_poll - NAPI poll handler.
  * @napi:      Pointer to napi_struct structure.
@@ -272,6 +330,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
        unsigned int work_done;
 
        arc_emac_tx_clean(ndev);
+       arc_emac_rx_miss_handle(ndev);
 
        work_done = arc_emac_rx(ndev, budget);
        if (work_done < budget) {
@@ -279,6 +338,8 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
                arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
        }
 
+       arc_emac_rx_stall_check(ndev, budget, work_done);
+
        return work_done;
 }
 
@@ -320,6 +381,8 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
                if (status & MSER_MASK) {
                        stats->rx_missed_errors += 0x100;
                        stats->rx_errors += 0x100;
+                       priv->rx_missed_errors += 0x100;
+                       napi_schedule(&priv->napi);
                }
 
                if (status & RXCR_MASK) {
@@ -732,6 +795,63 @@ static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 }
 
 
+/**
+ * arc_emac_restart - Restart EMAC
+ * @ndev:      Pointer to net_device structure.
+ *
+ * This function do hardware reset of EMAC in order to restore
+ * network packets reception.
+ */
+static void arc_emac_restart(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       int i;
+
+       if (net_ratelimit())
+               netdev_warn(ndev, "restarting stalled EMAC\n");
+
+       netif_stop_queue(ndev);
+
+       /* Disable interrupts */
+       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+       /* Disable EMAC */
+       arc_reg_clr(priv, R_CTRL, EN_MASK);
+
+       /* Return the sk_buff to system */
+       arc_free_tx_queue(ndev);
+
+       /* Clean Tx BD's */
+       priv->txbd_curr = 0;
+       priv->txbd_dirty = 0;
+       memset(priv->txbd, 0, TX_RING_SZ);
+
+       for (i = 0; i < RX_BD_NUM; i++) {
+               struct arc_emac_bd *rxbd = &priv->rxbd[i];
+               unsigned int info = le32_to_cpu(rxbd->info);
+
+               if (!(info & FOR_EMAC)) {
+                       stats->rx_errors++;
+                       stats->rx_dropped++;
+               }
+               /* Return ownership to EMAC */
+               rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
+       }
+       priv->last_rx_bd = 0;
+
+       /* Make sure info is visible to EMAC before enable */
+       wmb();
+
+       /* Enable interrupts */
+       arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+       /* Enable EMAC */
+       arc_reg_or(priv, R_CTRL, EN_MASK);
+
+       netif_start_queue(ndev);
+}
+
 static const struct net_device_ops arc_emac_netdev_ops = {
        .ndo_open               = arc_emac_open,
        .ndo_stop               = arc_emac_stop,
index d9346e2ac7205bd680d3cd41b00ae0cbdeefa35d..1fbbbabe758855650c9875f853cb820d753c2066 100644 (file)
@@ -1716,7 +1716,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
        struct bcm63xx_enet_platform_data *pd;
        struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
        struct mii_bus *bus;
-       const char *clk_name;
        int i, ret;
 
        if (!bcm_enet_shared_base[0])
@@ -1751,20 +1750,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
        dev->irq = priv->irq = res_irq->start;
        priv->irq_rx = res_irq_rx->start;
        priv->irq_tx = res_irq_tx->start;
-       priv->mac_id = pdev->id;
 
-       /* get rx & tx dma channel id for this mac */
-       if (priv->mac_id == 0) {
-               priv->rx_chan = 0;
-               priv->tx_chan = 1;
-               clk_name = "enet0";
-       } else {
-               priv->rx_chan = 2;
-               priv->tx_chan = 3;
-               clk_name = "enet1";
-       }
-
-       priv->mac_clk = devm_clk_get(&pdev->dev, clk_name);
+       priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
        if (IS_ERR(priv->mac_clk)) {
                ret = PTR_ERR(priv->mac_clk);
                goto out;
@@ -1795,9 +1782,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
                priv->dma_chan_width = pd->dma_chan_width;
                priv->dma_has_sram = pd->dma_has_sram;
                priv->dma_desc_shift = pd->dma_desc_shift;
+               priv->rx_chan = pd->rx_chan;
+               priv->tx_chan = pd->tx_chan;
        }
 
-       if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
+       if (priv->has_phy && !priv->use_external_mii) {
                /* using internal PHY, enable clock */
                priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
                if (IS_ERR(priv->phy_clk)) {
@@ -1828,7 +1817,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
                bus->priv = priv;
                bus->read = bcm_enet_mdio_read_phylib;
                bus->write = bcm_enet_mdio_write_phylib;
-               sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
+               sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
 
                /* only probe bus where we think the PHY is, because
                 * the mdio read operation return 0 instead of 0xffff
index 5a66728d47761d2c28f1371802db34ae44a67936..1d3c917eb8300e4d8a6a858764ae3077674966cd 100644 (file)
@@ -193,9 +193,6 @@ struct bcm_enet_mib_counters {
 
 struct bcm_enet_priv {
 
-       /* mac id (from platform device id) */
-       int mac_id;
-
        /* base remapped address of device */
        void __iomem *base;
 
index 4c739d5355d2279a5f1c4cd23e8ccf3288788e8d..01b7f2fc249c4aecadac24abade94b0896db310d 100644 (file)
@@ -2482,8 +2482,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
         */
        if (bp->dev->features & NETIF_F_LRO)
                fp->mode = TPA_MODE_LRO;
-       else if (bp->dev->features & NETIF_F_GRO &&
-                bnx2x_mtu_allows_gro(bp->dev->mtu))
+       else if (bp->dev->features & NETIF_F_GRO_HW)
                fp->mode = TPA_MODE_GRO;
        else
                fp->mode = TPA_MODE_DISABLED;
@@ -4874,6 +4873,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
         */
        dev->mtu = new_mtu;
 
+       if (!bnx2x_mtu_allows_gro(new_mtu))
+               dev->features &= ~NETIF_F_GRO_HW;
+
        if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
                SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
 
@@ -4903,10 +4905,13 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
        }
 
        /* TPA requires Rx CSUM offloading */
-       if (!(features & NETIF_F_RXCSUM)) {
+       if (!(features & NETIF_F_RXCSUM))
+               features &= ~NETIF_F_LRO;
+
+       if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
+               features &= ~NETIF_F_GRO_HW;
+       if (features & NETIF_F_GRO_HW)
                features &= ~NETIF_F_LRO;
-               features &= ~NETIF_F_GRO;
-       }
 
        return features;
 }
@@ -4933,13 +4938,8 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
                }
        }
 
-       /* if GRO is changed while LRO is enabled, don't force a reload */
-       if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
-               changes &= ~NETIF_F_GRO;
-
-       /* if GRO is changed while HW TPA is off, don't force a reload */
-       if ((changes & NETIF_F_GRO) && bp->disable_tpa)
-               changes &= ~NETIF_F_GRO;
+       /* Don't care about GRO changes */
+       changes &= ~NETIF_F_GRO;
 
        if (changes)
                bnx2x_reload = true;
index 91e2a7560b48d572d26e8566c9a3b0667083d45d..4d0654813de1f1a440ba207f820b60ab8e0f7c3c 100644 (file)
@@ -12400,8 +12400,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 
        /* Set TPA flags */
        if (bp->disable_tpa) {
-               bp->dev->hw_features &= ~NETIF_F_LRO;
-               bp->dev->features &= ~NETIF_F_LRO;
+               bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+               bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
        }
 
        if (CHIP_IS_E1(bp))
@@ -13273,7 +13273,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
 
        dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
-               NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
+               NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
                NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
        if (!chip_is_e1x) {
                dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
@@ -13309,6 +13309,8 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
 
        dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
        dev->features |= NETIF_F_HIGHDMA;
+       if (dev->features & NETIF_F_LRO)
+               dev->features &= ~NETIF_F_GRO_HW;
 
        /* Add Loopback capability to the device */
        dev->hw_features |= NETIF_F_LOOPBACK;
index 1d865ae201db79c7895e3dcf0af45af695bc4190..9efbdc6f1fcbb36c9bf1b93bc488db442805094a 100644 (file)
@@ -2755,7 +2755,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
                return;
        if (bp->dev->features & NETIF_F_LRO)
                bp->flags |= BNXT_FLAG_LRO;
-       if (bp->dev->features & NETIF_F_GRO)
+       else if (bp->dev->features & NETIF_F_GRO_HW)
                bp->flags |= BNXT_FLAG_GRO;
 }
 
@@ -2843,10 +2843,10 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
                        min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
                bp->flags &= ~BNXT_FLAG_AGG_RINGS;
                bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
-               bp->dev->hw_features &= ~NETIF_F_LRO;
-               bp->dev->features &= ~NETIF_F_LRO;
                bp->rx_dir = DMA_BIDIRECTIONAL;
                bp->rx_skb_func = bnxt_rx_page_skb;
+               /* Disable LRO or GRO_HW */
+               netdev_update_features(bp->dev);
        } else {
                bp->dev->max_mtu = bp->max_mtu;
                bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
@@ -6788,6 +6788,15 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
        if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
                features &= ~NETIF_F_NTUPLE;
 
+       if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+               features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+
+       if (!(features & NETIF_F_GRO))
+               features &= ~NETIF_F_GRO_HW;
+
+       if (features & NETIF_F_GRO_HW)
+               features &= ~NETIF_F_LRO;
+
        /* Both CTAG and STAG VLAN accelaration on the RX side have to be
         * turned on or off together.
         */
@@ -6821,9 +6830,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
        bool update_tpa = false;
 
        flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
-       if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
+       if (features & NETIF_F_GRO_HW)
                flags |= BNXT_FLAG_GRO;
-       if (features & NETIF_F_LRO)
+       else if (features & NETIF_F_LRO)
                flags |= BNXT_FLAG_LRO;
 
        if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
@@ -7924,8 +7933,8 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
                if (rc)
                        return rc;
                bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
-               bp->dev->hw_features &= ~NETIF_F_LRO;
-               bp->dev->features &= ~NETIF_F_LRO;
+               bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+               bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
                bnxt_set_ring_params(bp);
        }
 
@@ -8108,7 +8117,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
        dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
                            NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+       if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+               dev->hw_features |= NETIF_F_GRO_HW;
        dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+       if (dev->features & NETIF_F_GRO_HW)
+               dev->features &= ~NETIF_F_LRO;
        dev->priv_flags |= IFF_UNICAST_FLT;
 
 #ifdef CONFIG_BNXT_SRIOV
index de51c2177d03b3cf9e4653226bd8901b1d29834e..d09c5a9c53b502788224102a6ae789cc42cc9b25 100644 (file)
@@ -14225,7 +14225,9 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
        /* Reset PHY, otherwise the read DMA engine will be in a mode that
         * breaks all requests to 256 bytes.
         */
-       if (tg3_asic_rev(tp) == ASIC_REV_57766)
+       if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
+           tg3_asic_rev(tp) == ASIC_REV_5717 ||
+           tg3_asic_rev(tp) == ASIC_REV_5719)
                reset_phy = true;
 
        err = tg3_restart_hw(tp, reset_phy);
index d73fb6a85f8ed0268953ff86a01b82cd9796b81d..336670d00a521207cbe22311d2b04feebc59f1a8 100644 (file)
@@ -1004,9 +1004,10 @@ int cudbg_collect_rss(struct cudbg_init *pdbg_init,
 {
        struct adapter *padap = pdbg_init->adap;
        struct cudbg_buffer temp_buff = { 0 };
-       int rc;
+       int rc, nentries;
 
-       rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
+       nentries = t4_chip_rss_size(padap);
+       rc = cudbg_get_buff(dbg_buff, nentries * sizeof(u16), &temp_buff);
        if (rc)
                return rc;
 
index b1df2aa6be945a74e0f550a4d901a179e8ec9806..69d0b64e698673d78c8e66e466572c3d60e7f3e1 100644 (file)
@@ -1528,6 +1528,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
                     int port, int pf, int vf, u8 mac[]);
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
 void t4_fatal_err(struct adapter *adapter);
+unsigned int t4_chip_rss_size(struct adapter *adapter);
 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
                        int start, int n, const u16 *rspq, unsigned int nrspq);
 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
index 41c8736314f8e465edec748227d22a7c046e9785..581d628f01cc5ea62b7f6441c56c4cd69285c11e 100644 (file)
@@ -179,7 +179,7 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
                len = cudbg_mbytes_to_bytes(len);
                break;
        case CUDBG_RSS:
-               len = RSS_NENTRIES * sizeof(u16);
+               len = t4_chip_rss_size(adap) * sizeof(u16);
                break;
        case CUDBG_RSS_VF_CONF:
                len = adap->params.arch.vfcount *
index 4956e429ae1dabcbaf58132d627acfca3b814035..d3ced0438474db6afcf3199621bd29b5aa90c9d5 100644 (file)
@@ -2021,11 +2021,12 @@ static int rss_show(struct seq_file *seq, void *v, int idx)
 
 static int rss_open(struct inode *inode, struct file *file)
 {
-       int ret;
-       struct seq_tab *p;
        struct adapter *adap = inode->i_private;
+       int ret, nentries;
+       struct seq_tab *p;
 
-       p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show);
+       nentries = t4_chip_rss_size(adap);
+       p = seq_open_tab(file, nentries / 8, 8 * sizeof(u16), 0, rss_show);
        if (!p)
                return -ENOMEM;
 
@@ -2668,10 +2669,14 @@ static const struct file_operations mem_debugfs_fops = {
 
 static int tid_info_show(struct seq_file *seq, void *v)
 {
+       unsigned int tid_start = 0;
        struct adapter *adap = seq->private;
        const struct tid_info *t = &adap->tids;
        enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
 
+       if (chip > CHELSIO_T5)
+               tid_start = t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
+
        if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
                unsigned int sb;
                seq_printf(seq, "Connections in use: %u\n",
@@ -2683,8 +2688,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
                        sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
 
                if (sb) {
-                       seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
-                                  adap->tids.hash_base,
+                       seq_printf(seq, "TID range: %u..%u/%u..%u", tid_start,
+                                  sb - 1, adap->tids.hash_base,
                                   t->ntids - 1);
                        seq_printf(seq, ", in use: %u/%u\n",
                                   atomic_read(&t->tids_in_use),
@@ -2709,7 +2714,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
                seq_printf(seq, "Connections in use: %u\n",
                           atomic_read(&t->conns_in_use));
 
-               seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+               seq_printf(seq, "TID range: %u..%u", tid_start,
+                          tid_start + t->ntids - 1);
                seq_printf(seq, ", in use: %u\n",
                           atomic_read(&t->tids_in_use));
        }
index d18e708c21c5f2d1c0c9ca6b979f9ff8301ef821..44930ca06c4bd1860609fb69da04224739b59e19 100644 (file)
@@ -4927,6 +4927,14 @@ void t4_intr_disable(struct adapter *adapter)
        t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
 }
 
+unsigned int t4_chip_rss_size(struct adapter *adap)
+{
+       if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+               return RSS_NENTRIES;
+       else
+               return T6_RSS_NENTRIES;
+}
+
 /**
  *     t4_config_rss_range - configure a portion of the RSS mapping table
  *     @adapter: the adapter
@@ -5065,10 +5073,11 @@ static int rd_rss_row(struct adapter *adap, int row, u32 *val)
  */
 int t4_read_rss(struct adapter *adapter, u16 *map)
 {
+       int i, ret, nentries;
        u32 val;
-       int i, ret;
 
-       for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+       nentries = t4_chip_rss_size(adapter);
+       for (i = 0; i < nentries / 2; ++i) {
                ret = rd_rss_row(adapter, i, &val);
                if (ret)
                        return ret;
index 872a91b1930c04302580d31fa40c4c599528bb1e..361d5032c28849ab2fb6350a2e99d7a229eeaa38 100644 (file)
 #include <linux/types.h>
 
 enum {
-       NCHAN          = 4,     /* # of HW channels */
-       MAX_MTU        = 9600,  /* max MAC MTU, excluding header + FCS */
-       EEPROMSIZE     = 17408, /* Serial EEPROM physical size */
-       EEPROMVSIZE    = 32768, /* Serial EEPROM virtual address space size */
-       EEPROMPFSIZE   = 1024,  /* EEPROM writable area size for PFn, n>0 */
-       RSS_NENTRIES   = 2048,  /* # of entries in RSS mapping table */
-       TCB_SIZE       = 128,   /* TCB size */
-       NMTUS          = 16,    /* size of MTU table */
-       NCCTRL_WIN     = 32,    /* # of congestion control windows */
-       NTX_SCHED      = 8,     /* # of HW Tx scheduling queues */
-       PM_NSTATS      = 5,     /* # of PM stats */
-       T6_PM_NSTATS   = 7,     /* # of PM stats in T6 */
-       MBOX_LEN       = 64,    /* mailbox size in bytes */
-       TRACE_LEN      = 112,   /* length of trace data and mask */
-       FILTER_OPT_LEN = 36,    /* filter tuple width for optional components */
+       NCHAN           = 4,    /* # of HW channels */
+       MAX_MTU         = 9600, /* max MAC MTU, excluding header + FCS */
+       EEPROMSIZE      = 17408,/* Serial EEPROM physical size */
+       EEPROMVSIZE     = 32768,/* Serial EEPROM virtual address space size */
+       EEPROMPFSIZE    = 1024, /* EEPROM writable area size for PFn, n>0 */
+       RSS_NENTRIES    = 2048, /* # of entries in RSS mapping table */
+       T6_RSS_NENTRIES = 4096, /* # of entries in RSS mapping table */
+       TCB_SIZE        = 128,  /* TCB size */
+       NMTUS           = 16,   /* size of MTU table */
+       NCCTRL_WIN      = 32,   /* # of congestion control windows */
+       NTX_SCHED       = 8,    /* # of HW Tx scheduling queues */
+       PM_NSTATS       = 5,    /* # of PM stats */
+       T6_PM_NSTATS    = 7,    /* # of PM stats in T6 */
+       MBOX_LEN        = 64,   /* mailbox size in bytes */
+       TRACE_LEN       = 112,  /* length of trace data and mask */
+       FILTER_OPT_LEN  = 36,   /* filter tuple width for optional components */
 };
 
 enum {
index 60cf9e02de5df0b2e6e4d495f9a6e94ca9466817..51b18035d691e68f1622c3a7d0c8dd33295bab2c 100644 (file)
@@ -183,6 +183,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */
        CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
        CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
 
        /* T6 adapters:
         */
@@ -206,6 +207,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */
        CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */
        CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* __T4_PCI_ID_TBL_H__ */
index 6a9527004cb17bbdf77de12fdad162acb487e1b5..9b218f0e5a4cddc285ab89843065de2c0eab9d04 100644 (file)
@@ -43,6 +43,8 @@
 #define ENIC_CQ_MAX            (ENIC_WQ_MAX + ENIC_RQ_MAX)
 #define ENIC_INTR_MAX          (ENIC_CQ_MAX + 2)
 
+#define ENIC_WQ_NAPI_BUDGET    256
+
 #define ENIC_AIC_LARGE_PKT_DIFF        3
 
 struct enic_msix_entry {
index d98676e43e03d390f3e7564f70cfa8b4b1a4fb95..f202ba72a8116af610216d756596747a27741104 100644 (file)
@@ -1500,7 +1500,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
        unsigned int cq_wq = enic_cq_wq(enic, 0);
        unsigned int intr = enic_legacy_io_intr();
        unsigned int rq_work_to_do = budget;
-       unsigned int wq_work_to_do = -1; /* no limit */
+       unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
        unsigned int  work_done, rq_work_done = 0, wq_work_done;
        int err;
 
@@ -1598,7 +1598,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
        struct vnic_wq *wq = &enic->wq[wq_index];
        unsigned int cq;
        unsigned int intr;
-       unsigned int wq_work_to_do = -1; /* clean all desc possible */
+       unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
        unsigned int wq_work_done;
        unsigned int wq_irq;
 
index a9e2b32834c58f775475d37a419aa80db88af098..82e9a80345577d4b7465f7c1d387baa5b9a0d699 100644 (file)
@@ -278,6 +278,8 @@ struct hnae3_ae_dev {
  *   Set vlan filter config of Ports
  * set_vf_vlan_filter()
  *   Set vlan filter config of vf
+ * enable_hw_strip_rxvtag()
+ *   Enable/disable hardware strip vlan tag of packets received
  */
 struct hnae3_ae_ops {
        int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -384,8 +386,16 @@ struct hnae3_ae_ops {
                               u16 vlan_id, bool is_kill);
        int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
                                  u16 vlan, u8 qos, __be16 proto);
+       int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
        void (*reset_event)(struct hnae3_handle *handle,
                            enum hnae3_reset_type reset);
+       void (*get_channels)(struct hnae3_handle *handle,
+                            struct ethtool_channels *ch);
+       void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
+                                     u16 *free_tqps, u16 *max_rss_size);
+       int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
+       void (*get_flowctrl_adv)(struct hnae3_handle *handle,
+                                u32 *flowctrl_adv);
 };
 
 struct hnae3_dcb_ops {
index c2c13238db526c43e67793c925c3c0b71cdc2f97..320ae8892a68ef49d48e04794b2e0bec703e86ff 100644 (file)
@@ -723,6 +723,58 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
        hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
 }
 
+static int hns3_fill_desc_vtags(struct sk_buff *skb,
+                               struct hns3_enet_ring *tx_ring,
+                               u32 *inner_vlan_flag,
+                               u32 *out_vlan_flag,
+                               u16 *inner_vtag,
+                               u16 *out_vtag)
+{
+#define HNS3_TX_VLAN_PRIO_SHIFT 13
+
+       if (skb->protocol == htons(ETH_P_8021Q) &&
+           !(tx_ring->tqp->handle->kinfo.netdev->features &
+           NETIF_F_HW_VLAN_CTAG_TX)) {
+               /* When HW VLAN acceleration is turned off, and the stack
+                * sets the protocol to 802.1q, the driver just need to
+                * set the protocol to the encapsulated ethertype.
+                */
+               skb->protocol = vlan_get_protocol(skb);
+               return 0;
+       }
+
+       if (skb_vlan_tag_present(skb)) {
+               u16 vlan_tag;
+
+               vlan_tag = skb_vlan_tag_get(skb);
+               vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
+
+               /* Based on hw strategy, use out_vtag in two layer tag case,
+                * and use inner_vtag in one tag case.
+                */
+               if (skb->protocol == htons(ETH_P_8021Q)) {
+                       hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+                       *out_vtag = vlan_tag;
+               } else {
+                       hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+                       *inner_vtag = vlan_tag;
+               }
+       } else if (skb->protocol == htons(ETH_P_8021Q)) {
+               struct vlan_ethhdr *vhdr;
+               int rc;
+
+               rc = skb_cow_head(skb, 0);
+               if (rc < 0)
+                       return rc;
+               vhdr = (struct vlan_ethhdr *)skb->data;
+               vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
+                                       << HNS3_TX_VLAN_PRIO_SHIFT);
+       }
+
+       skb->protocol = vlan_get_protocol(skb);
+       return 0;
+}
+
 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
                          int size, dma_addr_t dma, int frag_end,
                          enum hns_desc_type type)
@@ -733,6 +785,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
        u16 bdtp_fe_sc_vld_ra_ri = 0;
        u32 type_cs_vlan_tso = 0;
        struct sk_buff *skb;
+       u16 inner_vtag = 0;
+       u16 out_vtag = 0;
        u32 paylen = 0;
        u16 mss = 0;
        __be16 protocol;
@@ -756,15 +810,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
                skb = (struct sk_buff *)priv;
                paylen = skb->len;
 
+               ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
+                                          &ol_type_vlan_len_msec,
+                                          &inner_vtag, &out_vtag);
+               if (unlikely(ret))
+                       return ret;
+
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
                        skb_reset_mac_len(skb);
                        protocol = skb->protocol;
 
-                       /* vlan packet*/
-                       if (protocol == htons(ETH_P_8021Q)) {
-                               protocol = vlan_get_protocol(skb);
-                               skb->protocol = protocol;
-                       }
                        ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
                        if (ret)
                                return ret;
@@ -790,6 +845,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
                        cpu_to_le32(type_cs_vlan_tso);
                desc->tx.paylen = cpu_to_le32(paylen);
                desc->tx.mss = cpu_to_le16(mss);
+               desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
+               desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
        }
 
        /* move ring pointer to next.*/
@@ -1032,6 +1089,9 @@ static int hns3_nic_set_features(struct net_device *netdev,
                                 netdev_features_t features)
 {
        struct hns3_nic_priv *priv = netdev_priv(netdev);
+       struct hnae3_handle *h = priv->ae_handle;
+       netdev_features_t changed;
+       int ret;
 
        if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
                priv->ops.fill_desc = hns3_fill_desc_tso;
@@ -1041,6 +1101,17 @@ static int hns3_nic_set_features(struct net_device *netdev,
                priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
        }
 
+       changed = netdev->features ^ features;
+       if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+               if (features & NETIF_F_HW_VLAN_CTAG_RX)
+                       ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
+               else
+                       ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
+
+               if (ret)
+                       return ret;
+       }
+
        netdev->features = features;
        return 0;
 }
@@ -1492,6 +1563,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
 
        netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_HW_VLAN_CTAG_FILTER |
+               NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
@@ -1506,6 +1578,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
 
        netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_HW_VLAN_CTAG_FILTER |
+               NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
@@ -2085,6 +2158,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
 
        prefetchw(skb->data);
 
+       /* Based on hw strategy, the tag offloaded will be stored at
+        * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+        * in one layer tag case.
+        */
+       if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+               u16 vlan_tag;
+
+               vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+               if (!(vlan_tag & VLAN_VID_MASK))
+                       vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+               if (vlan_tag & VLAN_VID_MASK)
+                       __vlan_hwaccel_put_tag(skb,
+                                              htons(ETH_P_8021Q),
+                                              vlan_tag);
+       }
+
        bnum = 1;
        if (length <= HNS3_RX_HEAD_SIZE) {
                memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
@@ -2651,6 +2740,19 @@ err:
        return ret;
 }
 
+static void hns3_put_ring_config(struct hns3_nic_priv *priv)
+{
+       struct hnae3_handle *h = priv->ae_handle;
+       int i;
+
+       for (i = 0; i < h->kinfo.num_tqps; i++) {
+               devm_kfree(priv->dev, priv->ring_data[i].ring);
+               devm_kfree(priv->dev,
+                          priv->ring_data[i + h->kinfo.num_tqps].ring);
+       }
+       devm_kfree(priv->dev, priv->ring_data);
+}
+
 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
 {
        int ret;
@@ -2787,8 +2889,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
                        h->ae_algo->ops->reset_queue(h, i);
 
                hns3_fini_ring(priv->ring_data[i].ring);
+               devm_kfree(priv->dev, priv->ring_data[i].ring);
                hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
+               devm_kfree(priv->dev,
+                          priv->ring_data[i + h->kinfo.num_tqps].ring);
        }
+       devm_kfree(priv->dev, priv->ring_data);
 
        return 0;
 }
@@ -3162,6 +3268,115 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
        return ret;
 }
 
+static u16 hns3_get_max_available_channels(struct net_device *netdev)
+{
+       struct hnae3_handle *h = hns3_get_handle(netdev);
+       u16 free_tqps, max_rss_size, max_tqps;
+
+       h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
+       max_tqps = h->kinfo.num_tc * max_rss_size;
+
+       return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
+}
+
+static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
+{
+       struct hns3_nic_priv *priv = netdev_priv(netdev);
+       struct hnae3_handle *h = hns3_get_handle(netdev);
+       int ret;
+
+       ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
+       if (ret)
+               return ret;
+
+       ret = hns3_get_ring_config(priv);
+       if (ret)
+               return ret;
+
+       ret = hns3_nic_init_vector_data(priv);
+       if (ret)
+               goto err_uninit_vector;
+
+       ret = hns3_init_all_ring(priv);
+       if (ret)
+               goto err_put_ring;
+
+       return 0;
+
+err_put_ring:
+       hns3_put_ring_config(priv);
+err_uninit_vector:
+       hns3_nic_uninit_vector_data(priv);
+       return ret;
+}
+
+static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
+{
+       return (new_tqp_num / num_tc) * num_tc;
+}
+
+int hns3_set_channels(struct net_device *netdev,
+                     struct ethtool_channels *ch)
+{
+       struct hns3_nic_priv *priv = netdev_priv(netdev);
+       struct hnae3_handle *h = hns3_get_handle(netdev);
+       struct hnae3_knic_private_info *kinfo = &h->kinfo;
+       bool if_running = netif_running(netdev);
+       u32 new_tqp_num = ch->combined_count;
+       u16 org_tqp_num;
+       int ret;
+
+       if (ch->rx_count || ch->tx_count)
+               return -EINVAL;
+
+       if (new_tqp_num > hns3_get_max_available_channels(netdev) ||
+           new_tqp_num < kinfo->num_tc) {
+               dev_err(&netdev->dev,
+                       "Change tqps fail, the tqp range is from %d to %d",
+                       kinfo->num_tc,
+                       hns3_get_max_available_channels(netdev));
+               return -EINVAL;
+       }
+
+       new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
+       if (kinfo->num_tqps == new_tqp_num)
+               return 0;
+
+       if (if_running)
+               dev_close(netdev);
+
+       hns3_clear_all_ring(h);
+
+       ret = hns3_nic_uninit_vector_data(priv);
+       if (ret) {
+               dev_err(&netdev->dev,
+                       "Unbind vector with tqp fail, nothing is changed");
+               goto open_netdev;
+       }
+
+       hns3_uninit_all_ring(priv);
+
+       org_tqp_num = h->kinfo.num_tqps;
+       ret = hns3_modify_tqp_num(netdev, new_tqp_num);
+       if (ret) {
+               ret = hns3_modify_tqp_num(netdev, org_tqp_num);
+               if (ret) {
+                       /* If revert to old tqp failed, fatal error occurred */
+                       dev_err(&netdev->dev,
+                               "Revert to old tqp num fail, ret=%d", ret);
+                       return ret;
+               }
+               dev_info(&netdev->dev,
+                        "Change tqp num fail, Revert to old tqp num");
+       }
+
+open_netdev:
+       if (if_running)
+               dev_open(netdev);
+
+       return ret;
+}
+
 static const struct hnae3_client_ops client_ops = {
        .init_instance = hns3_client_init,
        .uninit_instance = hns3_client_uninit,
index 8a9de759957bb694ceff04f57fc2822f29865b6f..a2a7ea3e9a3a2bd2a8c4faf5dc8c070fa3502c51 100644 (file)
@@ -595,6 +595,8 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
        (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
 
 void hns3_ethtool_set_ops(struct net_device *netdev);
+int hns3_set_channels(struct net_device *netdev,
+                     struct ethtool_channels *ch);
 
 bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
 int hns3_init_all_ring(struct hns3_nic_priv *priv);
index 65a69b4394570a666a2e62be821edc92e5964aa1..2ae4d39798c104a04c831c632a79b25e2c0797ba 100644 (file)
@@ -559,10 +559,23 @@ static void hns3_get_pauseparam(struct net_device *netdev,
                        &param->rx_pause, &param->tx_pause);
 }
 
+static int hns3_set_pauseparam(struct net_device *netdev,
+                              struct ethtool_pauseparam *param)
+{
+       struct hnae3_handle *h = hns3_get_handle(netdev);
+
+       if (h->ae_algo->ops->set_pauseparam)
+               return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
+                                                      param->rx_pause,
+                                                      param->tx_pause);
+       return -EOPNOTSUPP;
+}
+
 static int hns3_get_link_ksettings(struct net_device *netdev,
                                   struct ethtool_link_ksettings *cmd)
 {
        struct hnae3_handle *h = hns3_get_handle(netdev);
+       u32 flowctrl_adv = 0;
        u32 supported_caps;
        u32 advertised_caps;
        u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
@@ -638,6 +651,8 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
                if (!cmd->base.autoneg)
                        advertised_caps &= ~HNS3_LM_AUTONEG_BIT;
 
+               advertised_caps &= ~HNS3_LM_PAUSE_BIT;
+
                /* now, map driver link modes to ethtool link modes */
                hns3_driv_to_eth_caps(supported_caps, cmd, false);
                hns3_driv_to_eth_caps(advertised_caps, cmd, true);
@@ -650,6 +665,18 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
        /* 4.mdio_support */
        cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
 
+       /* 5.get flow control setttings */
+       if (h->ae_algo->ops->get_flowctrl_adv)
+               h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv);
+
+       if (flowctrl_adv & ADVERTISED_Pause)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Pause);
+
+       if (flowctrl_adv & ADVERTISED_Asym_Pause)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Asym_Pause);
+
        return 0;
 }
 
@@ -730,7 +757,7 @@ static int hns3_get_rxnfc(struct net_device *netdev,
 
        switch (cmd->cmd) {
        case ETHTOOL_GRXRINGS:
-               cmd->data = h->kinfo.num_tc * h->kinfo.rss_size;
+               cmd->data = h->kinfo.rss_size;
                break;
        case ETHTOOL_GRXFH:
                return h->ae_algo->ops->get_rss_tuple(h, cmd);
@@ -849,6 +876,15 @@ static int hns3_nway_reset(struct net_device *netdev)
        return genphy_restart_aneg(phy);
 }
 
+void hns3_get_channels(struct net_device *netdev,
+                      struct ethtool_channels *ch)
+{
+       struct hnae3_handle *h = hns3_get_handle(netdev);
+
+       if (h->ae_algo->ops->get_channels)
+               h->ae_algo->ops->get_channels(h, ch);
+}
+
 static const struct ethtool_ops hns3vf_ethtool_ops = {
        .get_drvinfo = hns3_get_drvinfo,
        .get_ringparam = hns3_get_ringparam,
@@ -871,6 +907,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
        .get_ringparam = hns3_get_ringparam,
        .set_ringparam = hns3_set_ringparam,
        .get_pauseparam = hns3_get_pauseparam,
+       .set_pauseparam = hns3_set_pauseparam,
        .get_strings = hns3_get_strings,
        .get_ethtool_stats = hns3_get_stats,
        .get_sset_count = hns3_get_sset_count,
@@ -883,6 +920,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
        .get_link_ksettings = hns3_get_link_ksettings,
        .set_link_ksettings = hns3_set_link_ksettings,
        .nway_reset = hns3_nway_reset,
+       .get_channels = hns3_get_channels,
+       .set_channels = hns3_set_channels,
 };
 
 void hns3_ethtool_set_ops(struct net_device *netdev)
index ce5ed88450427c02fb110edde5dd4db7bc5cdb8e..f5baba216e48b2a53756725acb19fb5a5704374f 100644 (file)
@@ -180,6 +180,10 @@ enum hclge_opcode_type {
        /* Promisuous mode command */
        HCLGE_OPC_CFG_PROMISC_MODE      = 0x0E01,
 
+       /* Vlan offload command */
+       HCLGE_OPC_VLAN_PORT_TX_CFG      = 0x0F01,
+       HCLGE_OPC_VLAN_PORT_RX_CFG      = 0x0F02,
+
        /* Interrupts cmd */
        HCLGE_OPC_ADD_RING_TO_VECTOR    = 0x1503,
        HCLGE_OPC_DEL_RING_TO_VECTOR    = 0x1504,
@@ -191,6 +195,7 @@ enum hclge_opcode_type {
        HCLGE_OPC_MAC_VLAN_INSERT           = 0x1003,
        HCLGE_OPC_MAC_ETHTYPE_ADD           = 0x1010,
        HCLGE_OPC_MAC_ETHTYPE_REMOVE    = 0x1011,
+       HCLGE_OPC_MAC_VLAN_MASK_SET     = 0x1012,
 
        /* Multicast linear table cmd */
        HCLGE_OPC_MTA_MAC_MODE_CFG          = 0x1020,
@@ -399,6 +404,8 @@ struct hclge_pf_res_cmd {
 #define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0)
 #define HCLGE_CFG_DEFAULT_SPEED_S      16
 #define HCLGE_CFG_DEFAULT_SPEED_M      GENMASK(23, 16)
+#define HCLGE_CFG_RSS_SIZE_S   24
+#define HCLGE_CFG_RSS_SIZE_M   GENMASK(31, 24)
 
 struct hclge_cfg_param_cmd {
        __le32 offset;
@@ -587,6 +594,15 @@ struct hclge_mac_vlan_tbl_entry_cmd {
        u8      rsv2[6];
 };
 
+#define HCLGE_VLAN_MASK_EN_B           0x0
+struct hclge_mac_vlan_mask_entry_cmd {
+       u8 rsv0[2];
+       u8 vlan_mask;
+       u8 rsv1;
+       u8 mac_mask[6];
+       u8 rsv2[14];
+};
+
 #define HCLGE_CFG_MTA_MAC_SEL_S                0x0
 #define HCLGE_CFG_MTA_MAC_SEL_M                GENMASK(1, 0)
 #define HCLGE_CFG_MTA_MAC_EN_B         0x7
@@ -658,6 +674,47 @@ struct hclge_vlan_filter_vf_cfg_cmd {
        u8  vf_bitmap[16];
 };
 
+#define HCLGE_ACCEPT_TAG_B             0
+#define HCLGE_ACCEPT_UNTAG_B           1
+#define HCLGE_PORT_INS_TAG1_EN_B       2
+#define HCLGE_PORT_INS_TAG2_EN_B       3
+#define HCLGE_CFG_NIC_ROCE_SEL_B       4
+struct hclge_vport_vtag_tx_cfg_cmd {
+       u8 vport_vlan_cfg;
+       u8 vf_offset;
+       u8 rsv1[2];
+       __le16 def_vlan_tag1;
+       __le16 def_vlan_tag2;
+       u8 vf_bitmap[8];
+       u8 rsv2[8];
+};
+
+#define HCLGE_REM_TAG1_EN_B            0
+#define HCLGE_REM_TAG2_EN_B            1
+#define HCLGE_SHOW_TAG1_EN_B           2
+#define HCLGE_SHOW_TAG2_EN_B           3
+struct hclge_vport_vtag_rx_cfg_cmd {
+       u8 vport_vlan_cfg;
+       u8 vf_offset;
+       u8 rsv1[6];
+       u8 vf_bitmap[8];
+       u8 rsv2[8];
+};
+
+struct hclge_tx_vlan_type_cfg_cmd {
+       __le16 ot_vlan_type;
+       __le16 in_vlan_type;
+       u8 rsv[20];
+};
+
+struct hclge_rx_vlan_type_cfg_cmd {
+       __le16 ot_fst_vlan_type;
+       __le16 ot_sec_vlan_type;
+       __le16 in_fst_vlan_type;
+       __le16 in_sec_vlan_type;
+       u8 rsv[16];
+};
+
 struct hclge_cfg_com_tqp_queue_cmd {
        __le16 tqp_id;
        __le16 stream_id;
index e97fd6654e5e46ae9ca25c8e7a10de2a2f22f3f6..0874acf5ef39b050e41311950b650f670a479f07 100644 (file)
@@ -982,6 +982,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
        cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
                                            HCLGE_CFG_DEFAULT_SPEED_M,
                                            HCLGE_CFG_DEFAULT_SPEED_S);
+       cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
+                                          HCLGE_CFG_RSS_SIZE_M,
+                                          HCLGE_CFG_RSS_SIZE_S);
+
        for (i = 0; i < ETH_ALEN; i++)
                cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
 
@@ -1059,7 +1063,7 @@ static int hclge_configure(struct hclge_dev *hdev)
 
        hdev->num_vmdq_vport = cfg.vmdq_vport_num;
        hdev->base_tqp_pid = 0;
-       hdev->rss_size_max = 1;
+       hdev->rss_size_max = cfg.rss_size_max;
        hdev->rx_buf_len = cfg.rx_buf_len;
        ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
        hdev->hw.mac.media_type = cfg.media_type;
@@ -1096,10 +1100,7 @@ static int hclge_configure(struct hclge_dev *hdev)
        for (i = 0; i < hdev->tm_info.num_tc; i++)
                hnae_set_bit(hdev->hw_tc_map, i, 1);
 
-       if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
-               hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
-       else
-               hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
+       hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
 
        return ret;
 }
@@ -2133,28 +2134,6 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
        return 0;
 }
 
-static int hclge_query_autoneg_result(struct hclge_dev *hdev)
-{
-       struct hclge_mac *mac = &hdev->hw.mac;
-       struct hclge_query_an_speed_dup_cmd *req;
-       struct hclge_desc desc;
-       int ret;
-
-       req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
-
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
-       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "autoneg result query cmd failed %d.\n", ret);
-               return ret;
-       }
-
-       mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
-
-       return 0;
-}
-
 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
 {
        struct hclge_config_auto_neg_cmd *req;
@@ -2190,15 +2169,42 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
+       struct phy_device *phydev = hdev->hw.mac.phydev;
 
-       hclge_query_autoneg_result(hdev);
+       if (phydev)
+               return phydev->autoneg;
 
        return hdev->hw.mac.autoneg;
 }
 
+static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
+                                          bool mask_vlan,
+                                          u8 *mac_mask)
+{
+       struct hclge_mac_vlan_mask_entry_cmd *req;
+       struct hclge_desc desc;
+       int status;
+
+       req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
+
+       hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+                    mask_vlan ? 1 : 0);
+       ether_addr_copy(req->mac_mask, mac_mask);
+
+       status = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (status)
+               dev_err(&hdev->pdev->dev,
+                       "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
+                       status);
+
+       return status;
+}
+
 static int hclge_mac_init(struct hclge_dev *hdev)
 {
        struct hclge_mac *mac = &hdev->hw.mac;
+       u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
        int ret;
 
        ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
@@ -2224,7 +2230,19 @@ static int hclge_mac_init(struct hclge_dev *hdev)
                return ret;
        }
 
-       return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+       ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "set mta filter mode fail ret=%d\n", ret);
+               return ret;
+       }
+
+       ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
+       if (ret)
+               dev_err(&hdev->pdev->dev,
+                       "set default mac_vlan_mask fail ret=%d\n", ret);
+
+       return ret;
 }
 
 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
@@ -4339,27 +4357,185 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
        return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
 }
 
+static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+{
+       struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
+       struct hclge_vport_vtag_tx_cfg_cmd *req;
+       struct hclge_dev *hdev = vport->back;
+       struct hclge_desc desc;
+       int status;
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
+
+       req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+       req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
+       req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B,
+                    vcfg->accept_tag ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B,
+                    vcfg->accept_untag ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+                    vcfg->insert_tag1_en ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+                    vcfg->insert_tag2_en ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+
+       req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+       req->vf_bitmap[req->vf_offset] =
+               1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+       status = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (status)
+               dev_err(&hdev->pdev->dev,
+                       "Send port txvlan cfg command fail, ret =%d\n",
+                       status);
+
+       return status;
+}
+
+static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
+{
+       struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
+       struct hclge_vport_vtag_rx_cfg_cmd *req;
+       struct hclge_dev *hdev = vport->back;
+       struct hclge_desc desc;
+       int status;
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
+
+       req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+                    vcfg->strip_tag1_en ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+                    vcfg->strip_tag2_en ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+                    vcfg->vlan1_vlan_prionly ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+                    vcfg->vlan2_vlan_prionly ? 1 : 0);
+
+       req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+       req->vf_bitmap[req->vf_offset] =
+               1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+       status = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (status)
+               dev_err(&hdev->pdev->dev,
+                       "Send port rxvlan cfg command fail, ret =%d\n",
+                       status);
+
+       return status;
+}
+
+static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+{
+       struct hclge_rx_vlan_type_cfg_cmd *rx_req;
+       struct hclge_tx_vlan_type_cfg_cmd *tx_req;
+       struct hclge_desc desc;
+       int status;
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
+       rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
+       rx_req->ot_fst_vlan_type =
+               cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
+       rx_req->ot_sec_vlan_type =
+               cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
+       rx_req->in_fst_vlan_type =
+               cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
+       rx_req->in_sec_vlan_type =
+               cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
+
+       status = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (status) {
+               dev_err(&hdev->pdev->dev,
+                       "Send rxvlan protocol type command fail, ret =%d\n",
+                       status);
+               return status;
+       }
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
+
+       tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
+       tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
+       tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
+
+       status = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (status)
+               dev_err(&hdev->pdev->dev,
+                       "Send txvlan protocol type command fail, ret =%d\n",
+                       status);
+
+       return status;
+}
+
 static int hclge_init_vlan_config(struct hclge_dev *hdev)
 {
-#define HCLGE_VLAN_TYPE_VF_TABLE   0
-#define HCLGE_VLAN_TYPE_PORT_TABLE 1
+#define HCLGE_FILTER_TYPE_VF           0
+#define HCLGE_FILTER_TYPE_PORT         1
+#define HCLGE_DEF_VLAN_TYPE            0x8100
+
        struct hnae3_handle *handle;
+       struct hclge_vport *vport;
        int ret;
+       int i;
 
-       ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
-                                        true);
+       ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
        if (ret)
                return ret;
 
-       ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
-                                        true);
+       ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
        if (ret)
                return ret;
 
+       hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+       hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+       hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+       hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+       hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
+       hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+
+       ret = hclge_set_vlan_protocol_type(hdev);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < hdev->num_alloc_vport; i++) {
+               vport = &hdev->vport[i];
+               vport->txvlan_cfg.accept_tag = true;
+               vport->txvlan_cfg.accept_untag = true;
+               vport->txvlan_cfg.insert_tag1_en = false;
+               vport->txvlan_cfg.insert_tag2_en = false;
+               vport->txvlan_cfg.default_tag1 = 0;
+               vport->txvlan_cfg.default_tag2 = 0;
+
+               ret = hclge_set_vlan_tx_offload_cfg(vport);
+               if (ret)
+                       return ret;
+
+               vport->rxvlan_cfg.strip_tag1_en = false;
+               vport->rxvlan_cfg.strip_tag2_en = true;
+               vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+               vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+               ret = hclge_set_vlan_rx_offload_cfg(vport);
+               if (ret)
+                       return ret;
+       }
+
        handle = &hdev->vport[0].nic;
        return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
 }
 
+static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+
+       vport->rxvlan_cfg.strip_tag1_en = false;
+       vport->rxvlan_cfg.strip_tag2_en = enable;
+       vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+       vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+       return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4481,6 +4657,103 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
        return hdev->fw_version;
 }
 
+static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
+                                  u32 *flowctrl_adv)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       struct phy_device *phydev = hdev->hw.mac.phydev;
+
+       if (!phydev)
+               return;
+
+       *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
+                        (phydev->advertising & ADVERTISED_Asym_Pause);
+}
+
+static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+       struct phy_device *phydev = hdev->hw.mac.phydev;
+
+       if (!phydev)
+               return;
+
+       phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+       if (rx_en)
+               phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+
+       if (tx_en)
+               phydev->advertising ^= ADVERTISED_Asym_Pause;
+}
+
+static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+       enum hclge_fc_mode fc_mode;
+       int ret;
+
+       if (rx_en && tx_en)
+               fc_mode = HCLGE_FC_FULL;
+       else if (rx_en && !tx_en)
+               fc_mode = HCLGE_FC_RX_PAUSE;
+       else if (!rx_en && tx_en)
+               fc_mode = HCLGE_FC_TX_PAUSE;
+       else
+               fc_mode = HCLGE_FC_NONE;
+
+       if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+               hdev->fc_mode_last_time = fc_mode;
+               return 0;
+       }
+
+       ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+       if (ret) {
+               dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
+                       ret);
+               return ret;
+       }
+
+       hdev->tm_info.fc_mode = fc_mode;
+
+       return 0;
+}
+
+int hclge_cfg_flowctrl(struct hclge_dev *hdev)
+{
+       struct phy_device *phydev = hdev->hw.mac.phydev;
+       u16 remote_advertising = 0;
+       u16 local_advertising = 0;
+       u32 rx_pause, tx_pause;
+       u8 flowctl;
+
+       if (!phydev->link || !phydev->autoneg)
+               return 0;
+
+       if (phydev->advertising & ADVERTISED_Pause)
+               local_advertising = ADVERTISE_PAUSE_CAP;
+
+       if (phydev->advertising & ADVERTISED_Asym_Pause)
+               local_advertising |= ADVERTISE_PAUSE_ASYM;
+
+       if (phydev->pause)
+               remote_advertising = LPA_PAUSE_CAP;
+
+       if (phydev->asym_pause)
+               remote_advertising |= LPA_PAUSE_ASYM;
+
+       flowctl = mii_resolve_flowctrl_fdx(local_advertising,
+                                          remote_advertising);
+       tx_pause = flowctl & FLOW_CTRL_TX;
+       rx_pause = flowctl & FLOW_CTRL_RX;
+
+       if (phydev->duplex == HCLGE_MAC_HALF) {
+               tx_pause = 0;
+               rx_pause = 0;
+       }
+
+       return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
+}
+
 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
                                 u32 *rx_en, u32 *tx_en)
 {
@@ -4510,6 +4783,41 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
        }
 }
 
+static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
+                               u32 rx_en, u32 tx_en)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       struct phy_device *phydev = hdev->hw.mac.phydev;
+       u32 fc_autoneg;
+
+       /* Only support flow control negotiation for netdev with
+        * phy attached for now.
+        */
+       if (!phydev)
+               return -EOPNOTSUPP;
+
+       fc_autoneg = hclge_get_autoneg(handle);
+       if (auto_neg != fc_autoneg) {
+               dev_info(&hdev->pdev->dev,
+                        "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+               dev_info(&hdev->pdev->dev,
+                        "Priority flow control enabled. Cannot set link flow control.\n");
+               return -EOPNOTSUPP;
+       }
+
+       hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+
+       if (!fc_autoneg)
+               return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+
+       return phy_start_aneg(phydev);
+}
+
 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
                                          u8 *auto_neg, u32 *speed, u8 *duplex)
 {
@@ -5002,6 +5310,136 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
        ae_dev->priv = NULL;
 }
 
+static u32 hclge_get_max_channels(struct hnae3_handle *handle)
+{
+       struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+
+       return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+static void hclge_get_channels(struct hnae3_handle *handle,
+                              struct ethtool_channels *ch)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+
+       ch->max_combined = hclge_get_max_channels(handle);
+       ch->other_count = 1;
+       ch->max_other = 1;
+       ch->combined_count = vport->alloc_tqps;
+}
+
+static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
+                                       u16 *free_tqps, u16 *max_rss_size)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       u16 temp_tqps = 0;
+       int i;
+
+       for (i = 0; i < hdev->num_tqps; i++) {
+               if (!hdev->htqp[i].alloced)
+                       temp_tqps++;
+       }
+       *free_tqps = temp_tqps;
+       *max_rss_size = hdev->rss_size_max;
+}
+
+static void hclge_release_tqp(struct hclge_vport *vport)
+{
+       struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+       struct hclge_dev *hdev = vport->back;
+       int i;
+
+       for (i = 0; i < kinfo->num_tqps; i++) {
+               struct hclge_tqp *tqp =
+                       container_of(kinfo->tqp[i], struct hclge_tqp, q);
+
+               tqp->q.handle = NULL;
+               tqp->q.tqp_index = 0;
+               tqp->alloced = false;
+       }
+
+       devm_kfree(&hdev->pdev->dev, kinfo->tqp);
+       kinfo->tqp = NULL;
+}
+
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+       struct hclge_dev *hdev = vport->back;
+       int cur_rss_size = kinfo->rss_size;
+       int cur_tqps = kinfo->num_tqps;
+       u16 tc_offset[HCLGE_MAX_TC_NUM];
+       u16 tc_valid[HCLGE_MAX_TC_NUM];
+       u16 tc_size[HCLGE_MAX_TC_NUM];
+       u16 roundup_size;
+       u32 *rss_indir;
+       int ret, i;
+
+       hclge_release_tqp(vport);
+
+       ret = hclge_knic_setup(vport, new_tqps_num);
+       if (ret) {
+               dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
+               return ret;
+       }
+
+       ret = hclge_map_tqp_to_vport(hdev, vport);
+       if (ret) {
+               dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
+               return ret;
+       }
+
+       ret = hclge_tm_schd_init(hdev);
+       if (ret) {
+               dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
+               return ret;
+       }
+
+       roundup_size = roundup_pow_of_two(kinfo->rss_size);
+       roundup_size = ilog2(roundup_size);
+       /* Set the RSS TC mode according to the new RSS size */
+       for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+               tc_valid[i] = 0;
+
+               if (!(hdev->hw_tc_map & BIT(i)))
+                       continue;
+
+               tc_valid[i] = 1;
+               tc_size[i] = roundup_size;
+               tc_offset[i] = kinfo->rss_size * i;
+       }
+       ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+       if (ret)
+               return ret;
+
+       /* Reinitializes the rss indirect table according to the new RSS size */
+       rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+       if (!rss_indir)
+               return -ENOMEM;
+
+       for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+               rss_indir[i] = i % kinfo->rss_size;
+
+       ret = hclge_set_rss(handle, rss_indir, NULL, 0);
+       if (ret)
+               dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
+                       ret);
+
+       kfree(rss_indir);
+
+       if (!ret)
+               dev_info(&hdev->pdev->dev,
+                        "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+                        cur_rss_size, kinfo->rss_size,
+                        cur_tqps, kinfo->rss_size * kinfo->num_tc);
+
+       return ret;
+}
+
 static const struct hnae3_ae_ops hclge_ops = {
        .init_ae_dev = hclge_init_ae_dev,
        .uninit_ae_dev = hclge_uninit_ae_dev,
@@ -5035,6 +5473,7 @@ static const struct hnae3_ae_ops hclge_ops = {
        .set_autoneg = hclge_set_autoneg,
        .get_autoneg = hclge_get_autoneg,
        .get_pauseparam = hclge_get_pauseparam,
+       .set_pauseparam = hclge_set_pauseparam,
        .set_mtu = hclge_set_mtu,
        .reset_queue = hclge_reset_tqp,
        .get_stats = hclge_get_stats,
@@ -5045,7 +5484,12 @@ static const struct hnae3_ae_ops hclge_ops = {
        .get_mdix_mode = hclge_get_mdix_mode,
        .set_vlan_filter = hclge_set_port_vlan_filter,
        .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
+       .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
        .reset_event = hclge_reset_event,
+       .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
+       .set_channels = hclge_set_channels,
+       .get_channels = hclge_get_channels,
+       .get_flowctrl_adv = hclge_get_flowctrl_adv,
 };
 
 static struct hnae3_ae_algo ae_algo = {
index fb043b54583d3a9aef01d4049f18a2dfc55fd0ac..28cc063306c4b493c9eb7feb58b65928fd636268 100644 (file)
 #define HCLGE_PHY_MDIX_STATUS_B        (6)
 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B  (11)
 
+/* Factor used to calculate offset and bitmap of VF num */
+#define HCLGE_VF_NUM_PER_CMD           64
+#define HCLGE_VF_NUM_PER_BYTE          8
+
 /* Reset related Registers */
 #define HCLGE_MISC_RESET_STS_REG       0x20700
 #define HCLGE_GLOBAL_RESET_REG         0x20A00
@@ -220,6 +224,7 @@ struct hclge_cfg {
        u8 tc_num;
        u16 tqp_desc_num;
        u16 rx_buf_len;
+       u16 rss_size_max;
        u8 phy_addr;
        u8 media_type;
        u8 mac_addr[ETH_ALEN];
@@ -423,6 +428,15 @@ struct hclge_hw_stats {
        struct hclge_32_bit_stats   all_32_bit_stats;
 };
 
+struct hclge_vlan_type_cfg {
+       u16 rx_ot_fst_vlan_type;
+       u16 rx_ot_sec_vlan_type;
+       u16 rx_in_fst_vlan_type;
+       u16 rx_in_sec_vlan_type;
+       u16 tx_ot_vlan_type;
+       u16 tx_in_vlan_type;
+};
+
 struct hclge_dev {
        struct pci_dev *pdev;
        struct hnae3_ae_dev *ae_dev;
@@ -509,6 +523,26 @@ struct hclge_dev {
        enum hclge_mta_dmac_sel_type mta_mac_sel_type;
        bool enable_mta; /* Mutilcast filter enable */
        bool accept_mta_mc; /* Whether accept mta filter multicast */
+
+       struct hclge_vlan_type_cfg vlan_type_cfg;
+};
+
+/* VPort level vlan tag configuration for TX direction */
+struct hclge_tx_vtag_cfg {
+       bool accept_tag;        /* Whether accept tagged packet from host */
+       bool accept_untag;      /* Whether accept untagged packet from host */
+       bool insert_tag1_en;    /* Whether insert inner vlan tag */
+       bool insert_tag2_en;    /* Whether insert outer vlan tag */
+       u16  default_tag1;      /* The default inner vlan tag to insert */
+       u16  default_tag2;      /* The default outer vlan tag to insert */
+};
+
+/* VPort level vlan tag configuration for RX direction */
+struct hclge_rx_vtag_cfg {
+       bool strip_tag1_en;     /* Whether strip inner vlan tag */
+       bool strip_tag2_en;     /* Whether strip outer vlan tag */
+       bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
+       bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
 };
 
 struct hclge_vport {
@@ -523,6 +557,9 @@ struct hclge_vport {
        u16 bw_limit;           /* VSI BW Limit (0 = disabled) */
        u8  dwrr;
 
+       struct hclge_tx_vtag_cfg  txvlan_cfg;
+       struct hclge_rx_vtag_cfg  rxvlan_cfg;
+
        int vport_id;
        struct hclge_dev *back;  /* Back reference to associated dev */
        struct hnae3_handle nic;
@@ -565,4 +602,5 @@ int hclge_rss_init_hw(struct hclge_dev *hdev);
 
 void hclge_mbx_handler(struct hclge_dev *hdev);
 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
+int hclge_cfg_flowctrl(struct hclge_dev *hdev);
 #endif
index 7069e9408d7de58b164293480c9bd75fa5eae3cf..c1dea3a47bdd24fc30a3b608065aef18f4b3da1f 100644 (file)
@@ -17,6 +17,7 @@
 #define HCLGE_PHY_SUPPORTED_FEATURES   (SUPPORTED_Autoneg | \
                                         SUPPORTED_TP | \
                                         SUPPORTED_Pause | \
+                                        SUPPORTED_Asym_Pause | \
                                         PHY_10BT_FEATURES | \
                                         PHY_100BT_FEATURES | \
                                         PHY_1000BT_FEATURES)
@@ -183,6 +184,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
        ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
        if (ret)
                netdev_err(netdev, "failed to adjust link.\n");
+
+       ret = hclge_cfg_flowctrl(hdev);
+       if (ret)
+               netdev_err(netdev, "failed to configure flow control.\n");
 }
 
 int hclge_mac_start_phy(struct hclge_dev *hdev)
index 7bfa2e5497cb768fe180f2e5956e00ee5f595e64..ea9355d82560c8e71aea1a68ee4ddfa90a4354bd 100644 (file)
@@ -23,8 +23,8 @@ enum hclge_shaper_level {
        HCLGE_SHAPER_LVL_PF     = 1,
 };
 
-#define HCLGE_SHAPER_BS_U_DEF  1
-#define HCLGE_SHAPER_BS_S_DEF  4
+#define HCLGE_SHAPER_BS_U_DEF  5
+#define HCLGE_SHAPER_BS_S_DEF  20
 
 #define HCLGE_ETHER_MAX_RATE   100000
 
@@ -112,7 +112,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
        return 0;
 }
 
-static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
 {
        struct hclge_desc desc;
 
index bf59961918ab589c0502cb4cd5ad815222663f00..16f413956f17815b8e447330a96025860bb5d96e 100644 (file)
@@ -118,4 +118,5 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
 int hclge_tm_map_cfg(struct hclge_dev *hdev);
 int hclge_tm_init_hw(struct hclge_dev *hdev);
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
 #endif
index 7feff2450ed68717240c1793279c45ee5bef525f..71ddad13baf48499a7ec5c0472921f352d143586 100644 (file)
@@ -199,18 +199,18 @@ static void __emac_set_multicast_list(struct emac_instance *dev);
 
 static inline int emac_phy_supports_gige(int phy_mode)
 {
-       return  phy_mode == PHY_MODE_GMII ||
-               phy_mode == PHY_MODE_RGMII ||
-               phy_mode == PHY_MODE_SGMII ||
-               phy_mode == PHY_MODE_TBI ||
-               phy_mode == PHY_MODE_RTBI;
+       return  phy_interface_mode_is_rgmii(phy_mode) ||
+               phy_mode == PHY_INTERFACE_MODE_GMII ||
+               phy_mode == PHY_INTERFACE_MODE_SGMII ||
+               phy_mode == PHY_INTERFACE_MODE_TBI ||
+               phy_mode == PHY_INTERFACE_MODE_RTBI;
 }
 
 static inline int emac_phy_gpcs(int phy_mode)
 {
-       return  phy_mode == PHY_MODE_SGMII ||
-               phy_mode == PHY_MODE_TBI ||
-               phy_mode == PHY_MODE_RTBI;
+       return  phy_mode == PHY_INTERFACE_MODE_SGMII ||
+               phy_mode == PHY_INTERFACE_MODE_TBI ||
+               phy_mode == PHY_INTERFACE_MODE_RTBI;
 }
 
 static inline void emac_tx_enable(struct emac_instance *dev)
@@ -2865,7 +2865,7 @@ static int emac_init_config(struct emac_instance *dev)
        /* PHY mode needs some decoding */
        dev->phy_mode = of_get_phy_mode(np);
        if (dev->phy_mode < 0)
-               dev->phy_mode = PHY_MODE_NA;
+               dev->phy_mode = PHY_INTERFACE_MODE_NA;
 
        /* Check EMAC version */
        if (of_device_is_compatible(np, "ibm,emac4sync")) {
@@ -3168,7 +3168,7 @@ static int emac_probe(struct platform_device *ofdev)
        printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
               ndev->name, dev->cell_index, np, ndev->dev_addr);
 
-       if (dev->phy_mode == PHY_MODE_SGMII)
+       if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
                printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
 
        if (dev->phy.address >= 0)
index 5afcc27ceebb6872bfb9cd2cda8abda2fcdae934..bc14dcf27b6b45a686fe7af63b4d3222c7425273 100644 (file)
@@ -104,19 +104,6 @@ struct emac_regs {
        } u1;
 };
 
-/*
- * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
- */
-#define PHY_MODE_NA    PHY_INTERFACE_MODE_NA
-#define PHY_MODE_MII   PHY_INTERFACE_MODE_MII
-#define PHY_MODE_RMII  PHY_INTERFACE_MODE_RMII
-#define PHY_MODE_SMII  PHY_INTERFACE_MODE_SMII
-#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII
-#define PHY_MODE_TBI   PHY_INTERFACE_MODE_TBI
-#define PHY_MODE_GMII  PHY_INTERFACE_MODE_GMII
-#define PHY_MODE_RTBI  PHY_INTERFACE_MODE_RTBI
-#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII
-
 /* EMACx_MR0 */
 #define EMAC_MR0_RXI                   0x80000000
 #define EMAC_MR0_TXI                   0x40000000
index 35865d05fccd3709fad96d41fa3a6b83edd213f8..aa070c063e48cea3cf77556fcbe333e0ebf2a4f4 100644 (file)
@@ -96,7 +96,7 @@ int emac_mii_reset_gpcs(struct mii_phy *phy)
        if ((val & BMCR_ISOLATE) && limit > 0)
                gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
 
-       if (limit > 0 && phy->mode == PHY_MODE_SGMII) {
+       if (limit > 0 && phy->mode == PHY_INTERFACE_MODE_SGMII) {
                /* Configure GPCS interface to recommended setting for SGMII */
                gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */
                gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */
@@ -313,16 +313,16 @@ static int cis8201_init(struct mii_phy *phy)
        epcr &= ~EPCR_MODE_MASK;
 
        switch (phy->mode) {
-       case PHY_MODE_TBI:
+       case PHY_INTERFACE_MODE_TBI:
                epcr |= EPCR_TBI_MODE;
                break;
-       case PHY_MODE_RTBI:
+       case PHY_INTERFACE_MODE_RTBI:
                epcr |= EPCR_RTBI_MODE;
                break;
-       case PHY_MODE_GMII:
+       case PHY_INTERFACE_MODE_GMII:
                epcr |= EPCR_GMII_MODE;
                break;
-       case PHY_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII:
        default:
                epcr |= EPCR_RGMII_MODE;
        }
index c4a1ac38bba8b0193d5ce15093023087bc8a4337..00f5999de3cf2801070e391da62008dbaa3f776f 100644 (file)
 /* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
 static inline int rgmii_valid_mode(int phy_mode)
 {
-       return  phy_mode == PHY_MODE_GMII ||
-               phy_mode == PHY_MODE_MII ||
-               phy_mode == PHY_MODE_RGMII ||
-               phy_mode == PHY_MODE_TBI ||
-               phy_mode == PHY_MODE_RTBI;
-}
-
-static inline const char *rgmii_mode_name(int mode)
-{
-       switch (mode) {
-       case PHY_MODE_RGMII:
-               return "RGMII";
-       case PHY_MODE_TBI:
-               return "TBI";
-       case PHY_MODE_GMII:
-               return "GMII";
-       case PHY_MODE_MII:
-               return "MII";
-       case PHY_MODE_RTBI:
-               return "RTBI";
-       default:
-               BUG();
-       }
+       return  phy_interface_mode_is_rgmii(phy_mode) ||
+               phy_mode == PHY_INTERFACE_MODE_GMII ||
+               phy_mode == PHY_INTERFACE_MODE_MII ||
+               phy_mode == PHY_INTERFACE_MODE_TBI ||
+               phy_mode == PHY_INTERFACE_MODE_RTBI;
 }
 
 static inline u32 rgmii_mode_mask(int mode, int input)
 {
        switch (mode) {
-       case PHY_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
                return RGMII_FER_RGMII(input);
-       case PHY_MODE_TBI:
+       case PHY_INTERFACE_MODE_TBI:
                return RGMII_FER_TBI(input);
-       case PHY_MODE_GMII:
+       case PHY_INTERFACE_MODE_GMII:
                return RGMII_FER_GMII(input);
-       case PHY_MODE_MII:
+       case PHY_INTERFACE_MODE_MII:
                return RGMII_FER_MII(input);
-       case PHY_MODE_RTBI:
+       case PHY_INTERFACE_MODE_RTBI:
                return RGMII_FER_RTBI(input);
        default:
                BUG();
@@ -115,7 +100,7 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode)
        out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
 
        printk(KERN_NOTICE "%pOF: input %d in %s mode\n",
-              ofdev->dev.of_node, input, rgmii_mode_name(mode));
+              ofdev->dev.of_node, input, phy_modes(mode));
 
        ++dev->users;
 
index 89c42d362292fbd61f19ecb2a70a27a65eb6cefa..fdcc734541feaa288d5bb1cc4ec5333c470d7658 100644 (file)
  */
 static inline int zmii_valid_mode(int mode)
 {
-       return  mode == PHY_MODE_MII ||
-               mode == PHY_MODE_RMII ||
-               mode == PHY_MODE_SMII ||
-               mode == PHY_MODE_NA;
+       return  mode == PHY_INTERFACE_MODE_MII ||
+               mode == PHY_INTERFACE_MODE_RMII ||
+               mode == PHY_INTERFACE_MODE_SMII ||
+               mode == PHY_INTERFACE_MODE_NA;
 }
 
 static inline const char *zmii_mode_name(int mode)
 {
        switch (mode) {
-       case PHY_MODE_MII:
+       case PHY_INTERFACE_MODE_MII:
                return "MII";
-       case PHY_MODE_RMII:
+       case PHY_INTERFACE_MODE_RMII:
                return "RMII";
-       case PHY_MODE_SMII:
+       case PHY_INTERFACE_MODE_SMII:
                return "SMII";
        default:
                BUG();
@@ -72,11 +72,11 @@ static inline const char *zmii_mode_name(int mode)
 static inline u32 zmii_mode_mask(int mode, int input)
 {
        switch (mode) {
-       case PHY_MODE_MII:
+       case PHY_INTERFACE_MODE_MII:
                return ZMII_FER_MII(input);
-       case PHY_MODE_RMII:
+       case PHY_INTERFACE_MODE_RMII:
                return ZMII_FER_RMII(input);
-       case PHY_MODE_SMII:
+       case PHY_INTERFACE_MODE_SMII:
                return ZMII_FER_SMII(input);
        default:
                return 0;
@@ -106,27 +106,27 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode)
         * Please, always specify PHY mode in your board port to avoid
         * any surprises.
         */
-       if (dev->mode == PHY_MODE_NA) {
-               if (*mode == PHY_MODE_NA) {
+       if (dev->mode == PHY_INTERFACE_MODE_NA) {
+               if (*mode == PHY_INTERFACE_MODE_NA) {
                        u32 r = dev->fer_save;
 
                        ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r);
 
                        if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
-                               dev->mode = PHY_MODE_MII;
+                               dev->mode = PHY_INTERFACE_MODE_MII;
                        else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
-                               dev->mode = PHY_MODE_RMII;
+                               dev->mode = PHY_INTERFACE_MODE_RMII;
                        else
-                               dev->mode = PHY_MODE_SMII;
-               } else
+                               dev->mode = PHY_INTERFACE_MODE_SMII;
+               } else {
                        dev->mode = *mode;
-
+               }
                printk(KERN_NOTICE "%pOF: bridge in %s mode\n",
                       ofdev->dev.of_node,
                       zmii_mode_name(dev->mode));
        } else {
                /* All inputs must use the same mode */
-               if (*mode != PHY_MODE_NA && *mode != dev->mode) {
+               if (*mode != PHY_INTERFACE_MODE_NA && *mode != dev->mode) {
                        printk(KERN_ERR
                               "%pOF: invalid mode %d specified for input %d\n",
                               ofdev->dev.of_node, *mode, input);
@@ -246,7 +246,7 @@ static int zmii_probe(struct platform_device *ofdev)
 
        mutex_init(&dev->lock);
        dev->ofdev = ofdev;
-       dev->mode = PHY_MODE_NA;
+       dev->mode = PHY_INTERFACE_MODE_NA;
 
        rc = -ENXIO;
        if (of_address_to_resource(np, 0, &regs)) {
index 1dc4aef37d3a4faf3592ab0dede92b802a831824..6911b7cc06c527429acdf094782a23c35445c647 100644 (file)
@@ -59,6 +59,7 @@
 #include <linux/mm.h>
 #include <linux/ethtool.h>
 #include <linux/proc_fs.h>
+#include <linux/if_arp.h>
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
@@ -1153,6 +1154,9 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
                        hdr_len[2] = tcp_hdrlen(skb);
                else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
                        hdr_len[2] = sizeof(struct udphdr);
+       } else if (skb->protocol == htons(ETH_P_ARP)) {
+               hdr_len[1] = arp_hdr_len(skb->dev);
+               hdr_len[2] = 0;
        }
 
        memset(hdr_data, 0, 120);
@@ -1386,7 +1390,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
        /* determine if l2/3/4 headers are sent to firmware */
        if ((*hdrs >> 7) & 1 &&
            (skb->protocol == htons(ETH_P_IP) ||
-            skb->protocol == htons(ETH_P_IPV6))) {
+            skb->protocol == htons(ETH_P_IPV6) ||
+            skb->protocol == htons(ETH_P_ARP))) {
                build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
                tx_crq.v1.n_crq_elem = num_entries;
                tx_buff->indir_arr[0] = tx_crq;
@@ -4285,7 +4290,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        }
 
        netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
-                                  IBMVNIC_MAX_TX_QUEUES);
+                                  IBMVNIC_MAX_QUEUES);
        if (!netdev)
                return -ENOMEM;
 
index 4487f1e2c2668daf41030c6f0d39c8a327ab904c..2df79fdd800b69b75b34faeaa7279cb21b7fc56b 100644 (file)
@@ -39,7 +39,7 @@
 #define IBMVNIC_RX_WEIGHT              16
 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
 #define IBMVNIC_BUFFS_PER_POOL 100
-#define IBMVNIC_MAX_TX_QUEUES  5
+#define IBMVNIC_MAX_QUEUES     10
 
 #define IBMVNIC_TSO_BUF_SZ     65536
 #define IBMVNIC_TSO_BUFS       64
index bc93b69cfd1edcf62d11cd24d41a9ca74b8f0dcc..a539263cd79ce4be8fcc0cbfe6bfdd196336cd38 100644 (file)
@@ -1214,6 +1214,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)
        val &= ~MVNETA_GMAC0_PORT_ENABLE;
        mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 
+       pp->link = 0;
+       pp->duplex = -1;
+       pp->speed = 0;
+
        udelay(200);
 }
 
@@ -1958,9 +1962,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
 
                if (!mvneta_rxq_desc_is_first_last(rx_status) ||
                    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+                       mvneta_rx_error(pp, rx_desc);
 err_drop_frame:
                        dev->stats.rx_errors++;
-                       mvneta_rx_error(pp, rx_desc);
                        /* leave the descriptor untouched */
                        continue;
                }
@@ -3011,7 +3015,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
 {
        int queue;
 
-       for (queue = 0; queue < txq_number; queue++)
+       for (queue = 0; queue < rxq_number; queue++)
                mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
 }
 
index 54adfd96785846f9e60a2ded11ab96bc0c196c7e..29826dd152044fffb4b3658311f0ec54a7e3deaf 100644 (file)
@@ -1952,20 +1952,23 @@ static int mtk_hw_init(struct mtk_eth *eth)
        }
        regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
 
-       /* Set GE2 driving and slew rate */
-       regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
+       if (eth->pctl) {
+               /* Set GE2 driving and slew rate */
+               regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
 
-       /* set GE2 TDSEL */
-       regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
+               /* set GE2 TDSEL */
+               regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
 
-       /* set GE2 TUNE */
-       regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
-
-       /* GE1, Force 1000M/FD, FC ON */
-       mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
+               /* set GE2 TUNE */
+               regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+       }
 
-       /* GE2, Force 1000M/FD, FC ON */
-       mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
+       /* Set linkdown as the default for each GMAC. Its own MCR would be set
+        * up with the more appropriate value when mtk_phy_link_adjust call is
+        * being invoked.
+        */
+       for (i = 0; i < MTK_MAC_COUNT; i++)
+               mtk_w32(eth, 0, MTK_MAC_MCR(i));
 
        /* Indicates CDM to parse the MTK special tag from CPU
         * which also is working out for untag packets.
@@ -2537,11 +2540,13 @@ static int mtk_probe(struct platform_device *pdev)
                }
        }
 
-       eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-                                                   "mediatek,pctl");
-       if (IS_ERR(eth->pctl)) {
-               dev_err(&pdev->dev, "no pctl regmap found\n");
-               return PTR_ERR(eth->pctl);
+       if (eth->soc->required_pctl) {
+               eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                                                           "mediatek,pctl");
+               if (IS_ERR(eth->pctl)) {
+                       dev_err(&pdev->dev, "no pctl regmap found\n");
+                       return PTR_ERR(eth->pctl);
+               }
        }
 
        for (i = 0; i < 3; i++) {
@@ -2667,17 +2672,20 @@ static int mtk_remove(struct platform_device *pdev)
 
 static const struct mtk_soc_data mt2701_data = {
        .caps = MTK_GMAC1_TRGMII,
-       .required_clks = MT7623_CLKS_BITMAP
+       .required_clks = MT7623_CLKS_BITMAP,
+       .required_pctl = true,
 };
 
 static const struct mtk_soc_data mt7622_data = {
        .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW,
-       .required_clks = MT7622_CLKS_BITMAP
+       .required_clks = MT7622_CLKS_BITMAP,
+       .required_pctl = false,
 };
 
 static const struct mtk_soc_data mt7623_data = {
        .caps = MTK_GMAC1_TRGMII,
-       .required_clks = MT7623_CLKS_BITMAP
+       .required_clks = MT7623_CLKS_BITMAP,
+       .required_pctl = true,
 };
 
 const struct of_device_id of_mtk_match[] = {
index a3af4660de81cc1f3379d18e714eabdcdd595af9..672b8c353c47d92357722d728d7f0aa922c310b6 100644 (file)
@@ -573,10 +573,13 @@ struct mtk_rx_ring {
  * @caps                       Flags shown the extra capability for the SoC
  * @required_clks              Flags shown the bitmap for required clocks on
  *                             the target SoC
+ * @required_pctl              A bool value to show whether the SoC requires
+ *                             the extra setup for those pins used by GMAC.
  */
 struct mtk_soc_data {
        u32             caps;
        u32             required_clks;
+       bool            required_pctl;
 };
 
 /* currently no SoC has more than 2 macs */
index 1fffdebbc9e8994c70a19f4982f26d1de98be5f2..e9a1fbcc4adfa6e692902b551d0c535bfe019a9a 100644 (file)
@@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
        case MLX5_CMD_OP_ALLOC_Q_COUNTER:
        case MLX5_CMD_OP_QUERY_Q_COUNTER:
-       case MLX5_CMD_OP_SET_RATE_LIMIT:
+       case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
        case MLX5_CMD_OP_QUERY_RATE_LIMIT:
        case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
        case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
@@ -505,7 +505,7 @@ const char *mlx5_command_str(int command)
        MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
        MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
        MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
-       MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
+       MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
        MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
        MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
        MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
index c0872b3284cb405583642d71a0e2e540d4804b6f..543060c305a073c0457cc31ae7318f425a0e7c49 100644 (file)
@@ -82,6 +82,9 @@
        max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev)       MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
 #define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
+#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
+       (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
+       MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
 
 #define MLX5_MPWRQ_LOG_WQE_SZ                  18
 #define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
@@ -590,6 +593,7 @@ struct mlx5e_channel {
        struct mlx5_core_dev      *mdev;
        struct hwtstamp_config    *tstamp;
        int                        ix;
+       int                        cpu;
 };
 
 struct mlx5e_channels {
@@ -935,8 +939,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
                                 u8 cq_period_mode);
 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
                                 u8 cq_period_mode);
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
-                             struct mlx5e_params *params, u8 rq_type);
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+                              struct mlx5e_params *params,
+                              u8 rq_type);
 
 static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
 {
index c6d90b6dd80efa9a1ee82958adf5ef4dd3b4522d..9bcf38f4123b504637c080413078c23304d9e49e 100644 (file)
@@ -274,6 +274,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
                                    struct ieee_ets *ets)
 {
+       bool have_ets_tc = false;
        int bw_sum = 0;
        int i;
 
@@ -288,11 +289,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
        }
 
        /* Validate Bandwidth Sum */
-       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+                       have_ets_tc = true;
                        bw_sum += ets->tc_tx_bw[i];
+               }
+       }
 
-       if (bw_sum != 0 && bw_sum != 100) {
+       if (have_ets_tc && bw_sum != 100) {
                netdev_err(netdev,
                           "Failed to validate ETS: BW sum is illegal\n");
                return -EINVAL;
index 23425f02840581f6be591bc48cf8cccc8cc26443..8f05efa5c829bccb67ddd8b24dc2997adfe4a6c8 100644 (file)
@@ -1523,8 +1523,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
        new_channels.params = priv->channels.params;
        MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
 
-       mlx5e_set_rq_type_params(priv->mdev, &new_channels.params,
-                                new_channels.params.rq_wq_type);
+       new_channels.params.mpwqe_log_stride_sz =
+               MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val);
+       new_channels.params.mpwqe_log_num_strides =
+               MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz;
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                priv->channels.params = new_channels.params;
@@ -1536,6 +1538,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
                return err;
 
        mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+       mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n",
+                 MLX5E_GET_PFLAG(&priv->channels.params,
+                                 MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
+
        return 0;
 }
 
index 0f5c012de52eaa723f2e53f29328ed30a6f5d77b..3aa1c90e7c86582013d8d19c3daee2386f7cc051 100644 (file)
@@ -71,11 +71,6 @@ struct mlx5e_channel_param {
        struct mlx5e_cq_param      icosq_cq;
 };
 
-static int mlx5e_get_node(struct mlx5e_priv *priv, int ix)
-{
-       return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix);
-}
-
 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
 {
        return MLX5_CAP_GEN(mdev, striding_rq) &&
@@ -83,8 +78,8 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
                MLX5_CAP_ETH(mdev, reg_umr_sq);
 }
 
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
-                             struct mlx5e_params *params, u8 rq_type)
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+                              struct mlx5e_params *params, u8 rq_type)
 {
        params->rq_wq_type = rq_type;
        params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -93,10 +88,8 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
                params->log_rq_size = is_kdump_kernel() ?
                        MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
                        MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
-               params->mpwqe_log_stride_sz =
-                       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
-                       MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
-                       MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
+               params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
+                       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
                params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
                        params->mpwqe_log_stride_sz;
                break;
@@ -120,13 +113,14 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
                       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
 }
 
-static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
+                               struct mlx5e_params *params)
 {
        u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
                    !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
                    MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
                    MLX5_WQ_TYPE_LINKED_LIST;
-       mlx5e_set_rq_type_params(mdev, params, rq_type);
+       mlx5e_init_rq_type_params(mdev, params, rq_type);
 }
 
 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -444,17 +438,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
        int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
        int mtt_sz = mlx5e_get_wqe_mtt_sz();
        int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
-       int node = mlx5e_get_node(c->priv, c->ix);
        int i;
 
        rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
-                                       GFP_KERNEL, node);
+                                     GFP_KERNEL, cpu_to_node(c->cpu));
        if (!rq->mpwqe.info)
                goto err_out;
 
        /* We allocate more than mtt_sz as we will align the pointer */
-       rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz,
-                                       GFP_KERNEL, node);
+       rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+                                       cpu_to_node(c->cpu));
        if (unlikely(!rq->mpwqe.mtt_no_align))
                goto err_free_wqe_info;
 
@@ -562,7 +555,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        int err;
        int i;
 
-       rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       rqp->wq.db_numa_node = cpu_to_node(c->cpu);
 
        err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
                                &rq->wq_ctrl);
@@ -629,8 +622,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
                rq->wqe.frag_info =
                        kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
-                                    GFP_KERNEL,
-                                    mlx5e_get_node(c->priv, c->ix));
+                                    GFP_KERNEL, cpu_to_node(c->cpu));
                if (!rq->wqe.frag_info) {
                        err = -ENOMEM;
                        goto err_rq_wq_destroy;
@@ -1000,13 +992,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
        sq->min_inline_mode = params->tx_min_inline_mode;
 
-       param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
        if (err)
                return err;
        sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
 
-       err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+       err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
@@ -1053,13 +1045,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
        sq->channel   = c;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
 
-       param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
        if (err)
                return err;
        sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
 
-       err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix));
+       err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
@@ -1126,13 +1118,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        if (MLX5_IPSEC_DEV(c->priv->mdev))
                set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
 
-       param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
        if (err)
                return err;
        sq->wq.db    = &sq->wq.db[MLX5_SND_DBR];
 
-       err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+       err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
@@ -1504,8 +1496,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
        struct mlx5_core_dev *mdev = c->priv->mdev;
        int err;
 
-       param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix);
-       param->wq.db_numa_node  = mlx5e_get_node(c->priv, c->ix);
+       param->wq.buf_numa_node = cpu_to_node(c->cpu);
+       param->wq.db_numa_node  = cpu_to_node(c->cpu);
        param->eq_ix   = c->ix;
 
        err = mlx5e_alloc_cq_common(mdev, param, cq);
@@ -1604,6 +1596,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
        mlx5e_free_cq(cq);
 }
 
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+       return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
                             struct mlx5e_params *params,
                             struct mlx5e_channel_param *cparam)
@@ -1752,12 +1749,13 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 {
        struct mlx5e_cq_moder icocq_moder = {0, 0};
        struct net_device *netdev = priv->netdev;
+       int cpu = mlx5e_get_cpu(priv, ix);
        struct mlx5e_channel *c;
        unsigned int irq;
        int err;
        int eqn;
 
-       c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix));
+       c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
        if (!c)
                return -ENOMEM;
 
@@ -1765,6 +1763,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        c->mdev     = priv->mdev;
        c->tstamp   = &priv->tstamp;
        c->ix       = ix;
+       c->cpu      = cpu;
        c->pdev     = &priv->mdev->pdev->dev;
        c->netdev   = priv->netdev;
        c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
@@ -1853,8 +1852,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
        for (tc = 0; tc < c->num_tc; tc++)
                mlx5e_activate_txqsq(&c->sq[tc]);
        mlx5e_activate_rq(&c->rq);
-       netif_set_xps_queue(c->netdev,
-               mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix);
+       netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
 }
 
 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -3679,6 +3677,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
                                                     struct sk_buff *skb,
                                                     netdev_features_t features)
 {
+       unsigned int offset = 0;
        struct udphdr *udph;
        u8 proto;
        u16 port;
@@ -3688,7 +3687,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
                proto = ip_hdr(skb)->protocol;
                break;
        case htons(ETH_P_IPV6):
-               proto = ipv6_hdr(skb)->nexthdr;
+               proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
                break;
        default:
                goto out;
index 60771865c99c9bf4402d042a760887c4497e0036..e7e7cef2bde402be23b191873a5790ed23fd7843 100644 (file)
@@ -466,7 +466,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
                        break;
                case MLX5_EVENT_TYPE_CQ_ERROR:
                        cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
-                       mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
+                       mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
                                       cqn, eqe->data.cq_err.syndrome);
                        mlx5_cq_event(dev, cqn, eqe->type);
                        break;
@@ -775,7 +775,7 @@ err1:
        return err;
 }
 
-int mlx5_stop_eqs(struct mlx5_core_dev *dev)
+void mlx5_stop_eqs(struct mlx5_core_dev *dev)
 {
        struct mlx5_eq_table *table = &dev->priv.eq_table;
        int err;
@@ -784,22 +784,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
        if (MLX5_CAP_GEN(dev, pg)) {
                err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
                if (err)
-                       return err;
+                       mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
+                                     err);
        }
 #endif
 
        err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
        if (err)
-               return err;
+               mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
+                             err);
 
-       mlx5_destroy_unmap_eq(dev, &table->async_eq);
+       err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
+       if (err)
+               mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
+                             err);
        mlx5_cmd_use_polling(dev);
 
        err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
        if (err)
-               mlx5_cmd_use_events(dev);
-
-       return err;
+               mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
+                             err);
 }
 
 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
index 3c11d6e2160abeef5a893b7b81274a7ce315368c..14962969c5ba8c4462662eeb30ef10cbe1c27fa6 100644 (file)
@@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,
        u8 actual_size;
        int err;
 
+       if (!size)
+               return -EINVAL;
+
        if (!fdev->mdev)
                return -ENOTCONN;
 
@@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,
        u8 actual_size;
        int err;
 
+       if (!size)
+               return -EINVAL;
+
        if (!fdev->mdev)
                return -ENOTCONN;
 
index c70fd663a63301e7e89ef9ee00d37c7075fe1a0b..dfaad9ecb2b8f155c5cdf30451c572b2d10f1d37 100644 (file)
@@ -174,6 +174,8 @@ static void del_hw_fte(struct fs_node *node);
 static void del_sw_flow_table(struct fs_node *node);
 static void del_sw_flow_group(struct fs_node *node);
 static void del_sw_fte(struct fs_node *node);
+static void del_sw_prio(struct fs_node *node);
+static void del_sw_ns(struct fs_node *node);
 /* Delete rule (destination) is special case that 
  * requires to lock the FTE for all the deletion process.
  */
@@ -408,6 +410,16 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
        return NULL;
 }
 
+static void del_sw_ns(struct fs_node *node)
+{
+       kfree(node);
+}
+
+static void del_sw_prio(struct fs_node *node)
+{
+       kfree(node);
+}
+
 static void del_hw_flow_table(struct fs_node *node)
 {
        struct mlx5_flow_table *ft;
@@ -2064,7 +2076,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
                return ERR_PTR(-ENOMEM);
 
        fs_prio->node.type = FS_TYPE_PRIO;
-       tree_init_node(&fs_prio->node, NULL, NULL);
+       tree_init_node(&fs_prio->node, NULL, del_sw_prio);
        tree_add_node(&fs_prio->node, &ns->node);
        fs_prio->num_levels = num_levels;
        fs_prio->prio = prio;
@@ -2090,7 +2102,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
                return ERR_PTR(-ENOMEM);
 
        fs_init_namespace(ns);
-       tree_init_node(&ns->node, NULL, NULL);
+       tree_init_node(&ns->node, NULL, del_sw_ns);
        tree_add_node(&ns->node, &prio->node);
        list_add_tail(&ns->node.list, &prio->node.children);
 
index 1a0e797ad001ad672c954c228350ff9bcdea125b..21d29f7936f6c5d1e26c6e0d3f10644fd0f096c8 100644 (file)
@@ -241,7 +241,7 @@ static void print_health_info(struct mlx5_core_dev *dev)
        u32 fw;
        int i;
 
-       /* If the syndrom is 0, the device is OK and no need to print buffer */
+       /* If the syndrome is 0, the device is OK and no need to print buffer */
        if (!ioread8(&h->synd))
                return;
 
index d2a66dc4adc6d2933cfbc60c28cd49c67716a010..8812d7208e8f3522500b3f3e971b4a7341b22c8f 100644 (file)
@@ -57,7 +57,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
                                   struct mlx5e_params *params)
 {
        /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
-       mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
+       mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
 
        /* RQ size in ipoib by default is 512 */
        params->log_rq_size = is_kdump_kernel() ?
index 5f323442cc5ac009d5006438d93183e96b85d0d9..8a89c7e8cd631f2e14cb7cbac99a8983964b7eda 100644 (file)
@@ -317,9 +317,6 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
 {
        struct mlx5_priv *priv = &dev->priv;
        struct mlx5_eq_table *table = &priv->eq_table;
-       struct irq_affinity irqdesc = {
-               .pre_vectors = MLX5_EQ_VEC_COMP_BASE,
-       };
        int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
        int nvec;
 
@@ -333,10 +330,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
        if (!priv->irq_info)
                goto err_free_msix;
 
-       nvec = pci_alloc_irq_vectors_affinity(dev->pdev,
+       nvec = pci_alloc_irq_vectors(dev->pdev,
                        MLX5_EQ_VEC_COMP_BASE + 1, nvec,
-                       PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
-                       &irqdesc);
+                       PCI_IRQ_MSIX);
        if (nvec < 0)
                return nvec;
 
@@ -622,6 +618,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
        return (u64)timer_l | (u64)timer_h1 << 32;
 }
 
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+       struct mlx5_priv *priv  = &mdev->priv;
+       int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+       if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+               mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+               return -ENOMEM;
+       }
+
+       cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
+                       priv->irq_info[i].mask);
+
+       if (IS_ENABLED(CONFIG_SMP) &&
+           irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+               mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
+
+       return 0;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+       struct mlx5_priv *priv  = &mdev->priv;
+       int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+       irq_set_affinity_hint(irq, NULL);
+       free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+               err = mlx5_irq_set_affinity_hint(mdev, i);
+               if (err)
+                       goto err_out;
+       }
+
+       return 0;
+
+err_out:
+       for (i--; i >= 0; i--)
+               mlx5_irq_clear_affinity_hint(mdev, i);
+
+       return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+       int i;
+
+       for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+               mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
                    unsigned int *irqn)
 {
@@ -1097,6 +1150,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
                goto err_stop_eqs;
        }
 
+       err = mlx5_irq_set_affinity_hints(dev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+               goto err_affinity_hints;
+       }
+
        err = mlx5_init_fs(dev);
        if (err) {
                dev_err(&pdev->dev, "Failed to init flow steering\n");
@@ -1154,6 +1213,9 @@ err_sriov:
        mlx5_cleanup_fs(dev);
 
 err_fs:
+       mlx5_irq_clear_affinity_hints(dev);
+
+err_affinity_hints:
        free_comp_eqs(dev);
 
 err_stop_eqs:
@@ -1222,6 +1284,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 
        mlx5_sriov_detach(dev);
        mlx5_cleanup_fs(dev);
+       mlx5_irq_clear_affinity_hints(dev);
        free_comp_eqs(dev);
        mlx5_stop_eqs(dev);
        mlx5_put_uars_page(dev, priv->uar);
index db9e665ab10474f934131b5c2a8fa2173fa5feb6..889130edb71525ecd1f46e88a11b2d3fa0ef843f 100644 (file)
@@ -213,8 +213,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
 err_cmd:
        memset(din, 0, sizeof(din));
        memset(dout, 0, sizeof(dout));
-       MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
-       MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+       MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+       MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
        mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
        return err;
 }
index e651e4c02867740d35c07bfcf485860f26ad6409..d3c33e9eea7292412974802c4c38ded8898ed55c 100644 (file)
@@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
        return ret_entry;
 }
 
-static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
+static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
                                   u32 rate, u16 index)
 {
-       u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]   = {0};
-       u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)]   = {0};
+       u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
 
-       MLX5_SET(set_rate_limit_in, in, opcode,
-                MLX5_CMD_OP_SET_RATE_LIMIT);
-       MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
-       MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
+       MLX5_SET(set_pp_rate_limit_in, in, opcode,
+                MLX5_CMD_OP_SET_PP_RATE_LIMIT);
+       MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
+       MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
        return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 }
 
@@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
                entry->refcount++;
        } else {
                /* new rate limit */
-               err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
+               err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
                if (err) {
                        mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
                                      rate, err);
@@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
        entry->refcount--;
        if (!entry->refcount) {
                /* need to remove rate */
-               mlx5_set_rate_limit_cmd(dev, 0, entry->index);
+               mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
                entry->rate = 0;
        }
 
@@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
        /* Clear all configured rates */
        for (i = 0; i < table->max_size; i++)
                if (table->rl_entry[i].rate)
-                       mlx5_set_rate_limit_cmd(dev, 0,
-                                               table->rl_entry[i].index);
+                       mlx5_set_pp_rate_limit_cmd(dev, 0,
+                                                  table->rl_entry[i].index);
 
        kfree(dev->priv.rl_table.rl_entry);
 }
index 07a9ba6cfc70a11f7b4c05c73c1b32a704b7e6ba..2f74953e4561511e23d8fe3219db89104e3dd9e3 100644 (file)
@@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
        struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
        struct mlx5e_vxlan *vxlan;
 
-       spin_lock(&vxlan_db->lock);
+       spin_lock_bh(&vxlan_db->lock);
        vxlan = radix_tree_lookup(&vxlan_db->tree, port);
-       spin_unlock(&vxlan_db->lock);
+       spin_unlock_bh(&vxlan_db->lock);
 
        return vxlan;
 }
@@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
        struct mlx5e_vxlan *vxlan;
        int err;
 
-       if (mlx5e_vxlan_lookup_port(priv, port))
+       mutex_lock(&priv->state_lock);
+       vxlan = mlx5e_vxlan_lookup_port(priv, port);
+       if (vxlan) {
+               atomic_inc(&vxlan->refcount);
                goto free_work;
+       }
 
        if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
                goto free_work;
@@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
                goto err_delete_port;
 
        vxlan->udp_port = port;
+       atomic_set(&vxlan->refcount, 1);
 
-       spin_lock_irq(&vxlan_db->lock);
+       spin_lock_bh(&vxlan_db->lock);
        err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
-       spin_unlock_irq(&vxlan_db->lock);
+       spin_unlock_bh(&vxlan_db->lock);
        if (err)
                goto err_free;
 
@@ -113,35 +118,39 @@ err_free:
 err_delete_port:
        mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
 free_work:
+       mutex_unlock(&priv->state_lock);
        kfree(vxlan_work);
 }
 
-static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
 {
+       struct mlx5e_vxlan_work *vxlan_work =
+               container_of(work, struct mlx5e_vxlan_work, work);
+       struct mlx5e_priv *priv         = vxlan_work->priv;
        struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+       u16 port = vxlan_work->port;
        struct mlx5e_vxlan *vxlan;
+       bool remove = false;
 
-       spin_lock_irq(&vxlan_db->lock);
-       vxlan = radix_tree_delete(&vxlan_db->tree, port);
-       spin_unlock_irq(&vxlan_db->lock);
-
+       mutex_lock(&priv->state_lock);
+       spin_lock_bh(&vxlan_db->lock);
+       vxlan = radix_tree_lookup(&vxlan_db->tree, port);
        if (!vxlan)
-               return;
-
-       mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
-
-       kfree(vxlan);
-}
+               goto out_unlock;
 
-static void mlx5e_vxlan_del_port(struct work_struct *work)
-{
-       struct mlx5e_vxlan_work *vxlan_work =
-               container_of(work, struct mlx5e_vxlan_work, work);
-       struct mlx5e_priv *priv = vxlan_work->priv;
-       u16 port = vxlan_work->port;
+       if (atomic_dec_and_test(&vxlan->refcount)) {
+               radix_tree_delete(&vxlan_db->tree, port);
+               remove = true;
+       }
 
-       __mlx5e_vxlan_core_del_port(priv, port);
+out_unlock:
+       spin_unlock_bh(&vxlan_db->lock);
 
+       if (remove) {
+               mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+               kfree(vxlan);
+       }
+       mutex_unlock(&priv->state_lock);
        kfree(vxlan_work);
 }
 
@@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
        struct mlx5e_vxlan *vxlan;
        unsigned int port = 0;
 
-       spin_lock_irq(&vxlan_db->lock);
+       /* Lockless since we are the only radix-tree consumers, wq is disabled */
        while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
                port = vxlan->udp_port;
-               spin_unlock_irq(&vxlan_db->lock);
-               __mlx5e_vxlan_core_del_port(priv, (u16)port);
-               spin_lock_irq(&vxlan_db->lock);
+               radix_tree_delete(&vxlan_db->tree, port);
+               mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+               kfree(vxlan);
        }
-       spin_unlock_irq(&vxlan_db->lock);
 }
index 5def12c048e38992e7edd9f870233e369ef4580e..5ef6ae7d568abcd1410bc403b526628a634799cb 100644 (file)
@@ -36,6 +36,7 @@
 #include "en.h"
 
 struct mlx5e_vxlan {
+       atomic_t refcount;
        u16 udp_port;
 };
 
index 72ef4f8025f00ff8810c2955b25b7f3baec49be1..be657b8533f04922a61a2f3a4b1aeddf3137cdf5 100644 (file)
@@ -2436,25 +2436,16 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
        rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
 }
 
-static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
-                                   const struct mlxsw_sp_rif *rif)
-{
-       char rauht_pl[MLXSW_REG_RAUHT_LEN];
-
-       mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
-                            rif->rif_index, rif->addr);
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
-}
-
 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
                                         struct mlxsw_sp_rif *rif)
 {
        struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
 
-       mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
        list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
-                                rif_list_node)
+                                rif_list_node) {
+               mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
                mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+       }
 }
 
 enum mlxsw_sp_nexthop_type {
index 2fe96f1f3fe5cae8438cf23dada52c0a616a7a37..bd6e9014bc74794b9a8a7e680f5b59ea7048382f 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/spi/spi.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
+#include <linux/of_net.h>
 
 #include "ks8851.h"
 
@@ -407,15 +408,23 @@ static void ks8851_read_mac_addr(struct net_device *dev)
  * @ks: The device structure
  *
  * Get or create the initial mac address for the device and then set that
- * into the station address register. If there is an EEPROM present, then
+ * into the station address register. A mac address supplied in the device
+ * tree takes precedence. Otherwise, if there is an EEPROM present, then
  * we try that. If no valid mac address is found we use eth_random_addr()
  * to create a new one.
  */
 static void ks8851_init_mac(struct ks8851_net *ks)
 {
        struct net_device *dev = ks->netdev;
+       const u8 *mac_addr;
+
+       mac_addr = of_get_mac_address(ks->spidev->dev.of_node);
+       if (mac_addr) {
+               memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+               ks8851_write_mac_addr(dev);
+               return;
+       }
 
-       /* first, try reading what we've got already */
        if (ks->rc_ccr & CCR_EEPROM) {
                ks8851_read_mac_addr(dev);
                if (is_valid_ether_addr(dev->dev_addr))
index 4f6553f0117866bc8e742ca57ff8d8fc23cf6b29..4b63167906ca04f1c22f4a2f1aaa1b56a39e0593 100644 (file)
@@ -84,11 +84,41 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
        return nfp_net_ebpf_capable(nn) ? "BPF" : "";
 }
 
+static int
+nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
+{
+       int err;
+
+       nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
+       if (!nn->app_priv)
+               return -ENOMEM;
+
+       err = nfp_app_nic_vnic_alloc(app, nn, id);
+       if (err)
+               goto err_free_priv;
+
+       return 0;
+err_free_priv:
+       kfree(nn->app_priv);
+       return err;
+}
+
+static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
+{
+       struct nfp_bpf_vnic *bv = nn->app_priv;
+
+       WARN_ON(bv->tc_prog);
+       kfree(bv);
+}
+
 static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
                                     void *type_data, void *cb_priv)
 {
        struct tc_cls_bpf_offload *cls_bpf = type_data;
        struct nfp_net *nn = cb_priv;
+       struct bpf_prog *oldprog;
+       struct nfp_bpf_vnic *bv;
+       int err;
 
        if (type != TC_SETUP_CLSBPF ||
            !tc_can_offload(nn->dp.netdev) ||
@@ -96,8 +126,6 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
            cls_bpf->common.protocol != htons(ETH_P_ALL) ||
            cls_bpf->common.chain_index)
                return -EOPNOTSUPP;
-       if (nn->dp.bpf_offload_xdp)
-               return -EBUSY;
 
        /* Only support TC direct action */
        if (!cls_bpf->exts_integrated ||
@@ -106,16 +134,25 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
                return -EOPNOTSUPP;
        }
 
-       switch (cls_bpf->command) {
-       case TC_CLSBPF_REPLACE:
-               return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
-       case TC_CLSBPF_ADD:
-               return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
-       case TC_CLSBPF_DESTROY:
-               return nfp_net_bpf_offload(nn, NULL, true);
-       default:
+       if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
                return -EOPNOTSUPP;
+
+       bv = nn->app_priv;
+       oldprog = cls_bpf->oldprog;
+
+       /* Don't remove if oldprog doesn't match driver's state */
+       if (bv->tc_prog != oldprog) {
+               oldprog = NULL;
+               if (!cls_bpf->prog)
+                       return 0;
        }
+
+       err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog);
+       if (err)
+               return err;
+
+       bv->tc_prog = cls_bpf->prog;
+       return 0;
 }
 
 static int nfp_bpf_setup_tc_block(struct net_device *netdev,
@@ -276,7 +313,8 @@ const struct nfp_app_type app_bpf = {
 
        .extra_cap      = nfp_bpf_extra_cap,
 
-       .vnic_alloc     = nfp_app_nic_vnic_alloc,
+       .vnic_alloc     = nfp_bpf_vnic_alloc,
+       .vnic_free      = nfp_bpf_vnic_free,
 
        .setup_tc       = nfp_bpf_setup_tc,
        .tc_busy        = nfp_bpf_tc_busy,
index f49669bf6b44564cd7ed8344c53bd89a4ffe7c6d..aae1be9ed056038af454dc108bb56ea0d3969686 100644 (file)
@@ -228,6 +228,14 @@ struct nfp_prog {
        struct list_head insns;
 };
 
+/**
+ * struct nfp_bpf_vnic - per-vNIC BPF priv structure
+ * @tc_prog:   currently loaded cls_bpf program
+ */
+struct nfp_bpf_vnic {
+       struct bpf_prog *tc_prog;
+};
+
 int nfp_bpf_jit(struct nfp_prog *prog);
 
 extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
index ca74c517f6261ca89ed4bb881c4e04db07b4b5ff..b3567a596fc1427c395ce18ea5f3541d65c0ce78 100644 (file)
@@ -81,6 +81,9 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
        if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
                return tun_type == NFP_FL_TUNNEL_VXLAN;
 
+       if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
+               return tun_type == NFP_FL_TUNNEL_GENEVE;
+
        return false;
 }
 
@@ -136,11 +139,23 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
        return 0;
 }
 
-static bool nfp_fl_supported_tun_port(const struct tc_action *action)
+static enum nfp_flower_tun_type
+nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
+                               const struct tc_action *action)
 {
        struct ip_tunnel_info *tun = tcf_tunnel_info(action);
-
-       return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
+       struct nfp_flower_priv *priv = app->priv;
+
+       switch (tun->key.tp_dst) {
+       case htons(NFP_FL_VXLAN_PORT):
+               return NFP_FL_TUNNEL_VXLAN;
+       case htons(NFP_FL_GENEVE_PORT):
+               if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
+                       return NFP_FL_TUNNEL_GENEVE;
+               /* FALLTHROUGH */
+       default:
+               return NFP_FL_TUNNEL_NONE;
+       }
 }
 
 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
@@ -165,38 +180,33 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
 }
 
 static int
-nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
-                const struct tc_action *action,
-                struct nfp_fl_pre_tunnel *pre_tun)
+nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
+                       const struct tc_action *action,
+                       struct nfp_fl_pre_tunnel *pre_tun,
+                       enum nfp_flower_tun_type tun_type)
 {
-       struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
-       size_t act_size = sizeof(struct nfp_fl_set_vxlan);
-       u32 tmp_set_vxlan_type_index = 0;
+       size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
+       struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+       u32 tmp_set_ip_tun_type_index = 0;
        /* Currently support one pre-tunnel so index is always 0. */
        int pretun_idx = 0;
 
-       if (vxlan->options_len) {
-               /* Do not support options e.g. vxlan gpe. */
+       if (ip_tun->options_len)
                return -EOPNOTSUPP;
-       }
 
-       set_vxlan->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
-       set_vxlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+       set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+       set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 
        /* Set tunnel type and pre-tunnel index. */
-       tmp_set_vxlan_type_index |=
-               FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
+       tmp_set_ip_tun_type_index |=
+               FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
                FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
 
-       set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);
-
-       set_vxlan->tun_id = vxlan->key.tun_id;
-       set_vxlan->tun_flags = vxlan->key.tun_flags;
-       set_vxlan->ipv4_ttl = vxlan->key.ttl;
-       set_vxlan->ipv4_tos = vxlan->key.tos;
+       set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
+       set_tun->tun_id = ip_tun->key.tun_id;
 
        /* Complete pre_tunnel action. */
-       pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;
+       pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
 
        return 0;
 }
@@ -433,8 +443,8 @@ nfp_flower_loop_action(const struct tc_action *a,
                       struct net_device *netdev,
                       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
 {
+       struct nfp_fl_set_ipv4_udp_tun *set_tun;
        struct nfp_fl_pre_tunnel *pre_tun;
-       struct nfp_fl_set_vxlan *s_vxl;
        struct nfp_fl_push_vlan *psh_v;
        struct nfp_fl_pop_vlan *pop_v;
        struct nfp_fl_output *output;
@@ -482,26 +492,29 @@ nfp_flower_loop_action(const struct tc_action *a,
 
                nfp_fl_push_vlan(psh_v, a);
                *a_len += sizeof(struct nfp_fl_push_vlan);
-       } else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
+       } else if (is_tcf_tunnel_set(a)) {
+               struct nfp_repr *repr = netdev_priv(netdev);
+               *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
+               if (*tun_type == NFP_FL_TUNNEL_NONE)
+                       return -EOPNOTSUPP;
+
                /* Pre-tunnel action is required for tunnel encap.
                 * This checks for next hop entries on NFP.
                 * If none, the packet falls back before applying other actions.
                 */
                if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
-                   sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
+                   sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
                        return -EOPNOTSUPP;
 
-               *tun_type = NFP_FL_TUNNEL_VXLAN;
                pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
                nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
                *a_len += sizeof(struct nfp_fl_pre_tunnel);
 
-               s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
-               err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
+               set_tun = (void *)&nfp_fl->action_data[*a_len];
+               err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
                if (err)
                        return err;
-
-               *a_len += sizeof(struct nfp_fl_set_vxlan);
+               *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
        } else if (is_tcf_tunnel_release(a)) {
                /* Tunnel decap is handled by default so accept action. */
                return 0;
index d6b63c8f14da80555cfb98ad335640542f35c49d..992d2eec1019217ad1ccc59f0f1dfcaa5b8de73e 100644 (file)
@@ -41,7 +41,7 @@
 #include "../nfp_app.h"
 #include "../nfpcore/nfp_cpp.h"
 
-#define NFP_FLOWER_LAYER_META          BIT(0)
+#define NFP_FLOWER_LAYER_EXT_META      BIT(0)
 #define NFP_FLOWER_LAYER_PORT          BIT(1)
 #define NFP_FLOWER_LAYER_MAC           BIT(2)
 #define NFP_FLOWER_LAYER_TP            BIT(3)
@@ -50,6 +50,8 @@
 #define NFP_FLOWER_LAYER_CT            BIT(6)
 #define NFP_FLOWER_LAYER_VXLAN         BIT(7)
 
+#define NFP_FLOWER_LAYER2_GENEVE       BIT(5)
+
 #define NFP_FLOWER_MASK_VLAN_PRIO      GENMASK(15, 13)
 #define NFP_FLOWER_MASK_VLAN_CFI       BIT(12)
 #define NFP_FLOWER_MASK_VLAN_VID       GENMASK(11, 0)
 enum nfp_flower_tun_type {
        NFP_FL_TUNNEL_NONE =    0,
        NFP_FL_TUNNEL_VXLAN =   2,
+       NFP_FL_TUNNEL_GENEVE =  4,
 };
 
 struct nfp_fl_act_head {
@@ -170,16 +173,13 @@ struct nfp_fl_pre_tunnel {
        __be32 extra[3];
 };
 
-struct nfp_fl_set_vxlan {
+struct nfp_fl_set_ipv4_udp_tun {
        struct nfp_fl_act_head head;
        __be16 reserved;
-       __be64 tun_id;
+       __be64 tun_id __packed;
        __be32 tun_type_index;
-       __be16 tun_flags;
-       u8 ipv4_ttl;
-       u8 ipv4_tos;
-       __be32 extra[2];
-} __packed;
+       __be32 extra[3];
+};
 
 /* Metadata with L2 (1W/4B)
  * ----------------------------------------------------------------
@@ -198,6 +198,18 @@ struct nfp_flower_meta_tci {
        __be16 tci;
 };
 
+/* Extended metadata for additional key_layers (1W/4B)
+ * ----------------------------------------------------------------
+ *    3                   2                   1
+ *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                      nfp_flow_key_layer2                      |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ext_meta {
+       __be32 nfp_flow_key_layer2;
+};
+
 /* Port details (1W/4B)
  * ----------------------------------------------------------------
  *    3                   2                   1
@@ -296,7 +308,7 @@ struct nfp_flower_ipv6 {
        struct in6_addr ipv6_dst;
 };
 
-/* Flow Frame VXLAN --> Tunnel details (4W/16B)
+/* Flow Frame IPv4 UDP TUNNEL --> Tunnel details (4W/16B)
  * -----------------------------------------------------------------
  *    3                   2                   1
  *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
@@ -305,22 +317,17 @@ struct nfp_flower_ipv6 {
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  * |                         ipv4_addr_dst                         |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |           tun_flags           |       tos     |       ttl     |
+ * |                            Reserved                           |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |   gpe_flags   |            Reserved           | Next Protocol |
+ * |                            Reserved                           |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  * |                     VNI                       |   Reserved    |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  */
-struct nfp_flower_vxlan {
+struct nfp_flower_ipv4_udp_tun {
        __be32 ip_src;
        __be32 ip_dst;
-       __be16 tun_flags;
-       u8 tos;
-       u8 ttl;
-       u8 gpe_flags;
-       u8 reserved[2];
-       u8 nxt_proto;
+       __be32 reserved[2];
        __be32 tun_id;
 };
 
index 8fcc90c0d2d3ad1a3f48348ca2d9c02e190133cb..63160e9754d4afb478f742bfee5364af9326a144 100644 (file)
@@ -381,7 +381,7 @@ static int nfp_flower_init(struct nfp_app *app)
 {
        const struct nfp_pf *pf = app->pf;
        struct nfp_flower_priv *app_priv;
-       u64 version;
+       u64 version, features;
        int err;
 
        if (!pf->eth_tbl) {
@@ -424,6 +424,14 @@ static int nfp_flower_init(struct nfp_app *app)
        if (err)
                goto err_free_app_priv;
 
+       /* Extract the extra features supported by the firmware. */
+       features = nfp_rtsym_read_le(app->pf->rtbl,
+                                    "_abi_flower_extra_features", &err);
+       if (err)
+               app_priv->flower_ext_feats = 0;
+       else
+               app_priv->flower_ext_feats = features;
+
        return 0;
 
 err_free_app_priv:
index e6b26c5ae6e0f11b8bdbe5593603e117bd1d5a4a..6e3937a0b708b57394b395dc303cb427f3033118 100644 (file)
@@ -34,6 +34,8 @@
 #ifndef __NFP_FLOWER_H__
 #define __NFP_FLOWER_H__ 1
 
+#include "cmsg.h"
+
 #include <linux/circ_buf.h>
 #include <linux/hashtable.h>
 #include <linux/time64.h>
@@ -58,6 +60,10 @@ struct nfp_app;
 #define NFP_FL_MASK_ID_LOCATION                1
 
 #define NFP_FL_VXLAN_PORT              4789
+#define NFP_FL_GENEVE_PORT             6081
+
+/* Extra features bitmap. */
+#define NFP_FL_FEATS_GENEVE            BIT(0)
 
 struct nfp_fl_mask_id {
        struct circ_buf mask_id_free_list;
@@ -77,6 +83,7 @@ struct nfp_fl_stats_id {
  * @nn:                        Pointer to vNIC
  * @mask_id_seed:      Seed used for mask hash table
  * @flower_version:    HW version of flower
+ * @flower_ext_feats:  Bitmap of extra features the HW supports
  * @stats_ids:         List of free stats ids
  * @mask_ids:          List of free mask ids
  * @mask_table:                Hash table used to store masks
@@ -101,6 +108,7 @@ struct nfp_flower_priv {
        struct nfp_net *nn;
        u32 mask_id_seed;
        u64 flower_version;
+       u64 flower_ext_feats;
        struct nfp_fl_stats_id stats_ids;
        struct nfp_fl_mask_id mask_ids;
        DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
@@ -172,7 +180,8 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
 int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
                                  struct nfp_fl_key_ls *key_ls,
                                  struct net_device *netdev,
-                                 struct nfp_fl_payload *nfp_flow);
+                                 struct nfp_fl_payload *nfp_flow,
+                                 enum nfp_flower_tun_type tun_type);
 int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
                              struct net_device *netdev,
                              struct nfp_fl_payload *nfp_flow);
index 1f2b879e12d4530bd74eff18daf53119be6fd7e5..37c2ecae2a7a53487374f5e06e278af83a6a5fab 100644 (file)
@@ -67,6 +67,12 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
        }
 }
 
+static void
+nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
+{
+       frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
+}
+
 static int
 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
                        bool mask_version, enum nfp_flower_tun_type tun_type)
@@ -216,16 +222,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
 }
 
 static void
-nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
-                        struct tc_cls_flower_offload *flow,
-                        bool mask_version, __be32 *tun_dst)
+nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
+                               struct tc_cls_flower_offload *flow,
+                               bool mask_version)
 {
        struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
-       struct flow_dissector_key_ipv4_addrs *vxlan_ips;
+       struct flow_dissector_key_ipv4_addrs *tun_ips;
        struct flow_dissector_key_keyid *vni;
 
-       /* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */
-       memset(frame, 0, sizeof(struct nfp_flower_vxlan));
+       memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
 
        if (dissector_uses_key(flow->dissector,
                               FLOW_DISSECTOR_KEY_ENC_KEYID)) {
@@ -240,31 +245,26 @@ nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
 
        if (dissector_uses_key(flow->dissector,
                               FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
-               vxlan_ips =
+               tun_ips =
                   skb_flow_dissector_target(flow->dissector,
                                             FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
                                             target);
-               frame->ip_src = vxlan_ips->src;
-               frame->ip_dst = vxlan_ips->dst;
-               *tun_dst = vxlan_ips->dst;
+               frame->ip_src = tun_ips->src;
+               frame->ip_dst = tun_ips->dst;
        }
 }
 
 int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
                                  struct nfp_fl_key_ls *key_ls,
                                  struct net_device *netdev,
-                                 struct nfp_fl_payload *nfp_flow)
+                                 struct nfp_fl_payload *nfp_flow,
+                                 enum nfp_flower_tun_type tun_type)
 {
-       enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
-       __be32 tun_dst, tun_dst_mask = 0;
        struct nfp_repr *netdev_repr;
        int err;
        u8 *ext;
        u8 *msk;
 
-       if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN)
-               tun_type = NFP_FL_TUNNEL_VXLAN;
-
        memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
        memset(nfp_flow->mask_data, 0, key_ls->key_size);
 
@@ -280,6 +280,16 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
        ext += sizeof(struct nfp_flower_meta_tci);
        msk += sizeof(struct nfp_flower_meta_tci);
 
+       /* Populate Extended Metadata if Required. */
+       if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
+               nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
+                                           key_ls->key_layer_two);
+               nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
+                                           key_ls->key_layer_two);
+               ext += sizeof(struct nfp_flower_ext_meta);
+               msk += sizeof(struct nfp_flower_ext_meta);
+       }
+
        /* Populate Exact Port data. */
        err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
                                      nfp_repr_get_port_id(netdev),
@@ -341,15 +351,17 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
                msk += sizeof(struct nfp_flower_ipv6);
        }
 
-       if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
+       if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
+           key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
+               __be32 tun_dst;
+
                /* Populate Exact VXLAN Data. */
-               nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
-                                        flow, false, &tun_dst);
+               nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
                /* Populate Mask VXLAN Data. */
-               nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
-                                        flow, true, &tun_dst_mask);
-               ext += sizeof(struct nfp_flower_vxlan);
-               msk += sizeof(struct nfp_flower_vxlan);
+               nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
+               tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
+               ext += sizeof(struct nfp_flower_ipv4_udp_tun);
+               msk += sizeof(struct nfp_flower_ipv4_udp_tun);
 
                /* Configure tunnel end point MAC. */
                if (nfp_netdev_is_nfp_repr(netdev)) {
index 98fb1cba3ed905c2e506a1117466ad02a2ee1404..837134a9137c805860246c5918f80ef5d51024c6 100644 (file)
@@ -130,12 +130,15 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
 }
 
 static int
-nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
+nfp_flower_calculate_key_layers(struct nfp_app *app,
+                               struct nfp_fl_key_ls *ret_key_ls,
                                struct tc_cls_flower_offload *flow,
-                               bool egress)
+                               bool egress,
+                               enum nfp_flower_tun_type *tun_type)
 {
        struct flow_dissector_key_basic *mask_basic = NULL;
        struct flow_dissector_key_basic *key_basic = NULL;
+       struct nfp_flower_priv *priv = app->priv;
        u32 key_layer_two;
        u8 key_layer;
        int key_size;
@@ -197,12 +200,27 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
                                                  FLOW_DISSECTOR_KEY_ENC_PORTS,
                                                  flow->key);
 
-               if (mask_enc_ports->dst != cpu_to_be16(~0) ||
-                   enc_ports->dst != htons(NFP_FL_VXLAN_PORT))
+               if (mask_enc_ports->dst != cpu_to_be16(~0))
                        return -EOPNOTSUPP;
 
-               key_layer |= NFP_FLOWER_LAYER_VXLAN;
-               key_size += sizeof(struct nfp_flower_vxlan);
+               switch (enc_ports->dst) {
+               case htons(NFP_FL_VXLAN_PORT):
+                       *tun_type = NFP_FL_TUNNEL_VXLAN;
+                       key_layer |= NFP_FLOWER_LAYER_VXLAN;
+                       key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+                       break;
+               case htons(NFP_FL_GENEVE_PORT):
+                       if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
+                               return -EOPNOTSUPP;
+                       *tun_type = NFP_FL_TUNNEL_GENEVE;
+                       key_layer |= NFP_FLOWER_LAYER_EXT_META;
+                       key_size += sizeof(struct nfp_flower_ext_meta);
+                       key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
+                       key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+                       break;
+               default:
+                       return -EOPNOTSUPP;
+               }
        } else if (egress) {
                /* Reject non tunnel matches offloaded to egress repr. */
                return -EOPNOTSUPP;
@@ -330,6 +348,7 @@ static int
 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
                       struct tc_cls_flower_offload *flow, bool egress)
 {
+       enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
        struct nfp_flower_priv *priv = app->priv;
        struct nfp_fl_payload *flow_pay;
        struct nfp_fl_key_ls *key_layer;
@@ -339,7 +358,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
        if (!key_layer)
                return -ENOMEM;
 
-       err = nfp_flower_calculate_key_layers(key_layer, flow, egress);
+       err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
+                                             &tun_type);
        if (err)
                goto err_free_key_ls;
 
@@ -349,7 +369,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
                goto err_free_key_ls;
        }
 
-       err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay);
+       err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
+                                           tun_type);
        if (err)
                goto err_destroy_flow;
 
index cadea673a40f7304d6345c95abc806e3eade07f7..49d6d789459ed3460c5731002d94ad7272dbdeb4 100644 (file)
@@ -824,7 +824,7 @@ struct fe_priv {
         */
        union ring_type get_tx, put_tx, last_tx;
        struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
-       struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
+       struct nv_skb_map *last_tx_ctx;
        struct nv_skb_map *tx_skb;
 
        union ring_type tx_ring;
@@ -1939,7 +1939,8 @@ static void nv_init_tx(struct net_device *dev)
                np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
        else
                np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
-       np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
+       np->get_tx_ctx = np->tx_skb;
+       np->put_tx_ctx = np->tx_skb;
        np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
        netdev_reset_queue(np->dev);
        np->tx_pkts_in_progress = 0;
@@ -2251,7 +2252,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                if (unlikely(put_tx++ == np->last_tx.orig))
                        put_tx = np->tx_ring.orig;
                if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-                       np->put_tx_ctx = np->first_tx_ctx;
+                       np->put_tx_ctx = np->tx_skb;
        } while (size);
 
        /* setup the fragments */
@@ -2277,7 +2278,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                do {
                                        nv_unmap_txskb(np, start_tx_ctx);
                                        if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
-                                               tmp_tx_ctx = np->first_tx_ctx;
+                                               tmp_tx_ctx = np->tx_skb;
                                } while (tmp_tx_ctx != np->put_tx_ctx);
                                dev_kfree_skb_any(skb);
                                np->put_tx_ctx = start_tx_ctx;
@@ -2297,7 +2298,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        if (unlikely(put_tx++ == np->last_tx.orig))
                                put_tx = np->tx_ring.orig;
                        if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-                               np->put_tx_ctx = np->first_tx_ctx;
+                               np->put_tx_ctx = np->tx_skb;
                } while (frag_size);
        }
 
@@ -2306,7 +2307,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
        else
                prev_tx = put_tx - 1;
 
-       if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+       if (unlikely(np->put_tx_ctx == np->tx_skb))
                prev_tx_ctx = np->last_tx_ctx;
        else
                prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2409,7 +2410,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
                if (unlikely(put_tx++ == np->last_tx.ex))
                        put_tx = np->tx_ring.ex;
                if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-                       np->put_tx_ctx = np->first_tx_ctx;
+                       np->put_tx_ctx = np->tx_skb;
        } while (size);
 
        /* setup the fragments */
@@ -2435,7 +2436,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
                                do {
                                        nv_unmap_txskb(np, start_tx_ctx);
                                        if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
-                                               tmp_tx_ctx = np->first_tx_ctx;
+                                               tmp_tx_ctx = np->tx_skb;
                                } while (tmp_tx_ctx != np->put_tx_ctx);
                                dev_kfree_skb_any(skb);
                                np->put_tx_ctx = start_tx_ctx;
@@ -2455,7 +2456,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
                        if (unlikely(put_tx++ == np->last_tx.ex))
                                put_tx = np->tx_ring.ex;
                        if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-                               np->put_tx_ctx = np->first_tx_ctx;
+                               np->put_tx_ctx = np->tx_skb;
                } while (frag_size);
        }
 
@@ -2464,7 +2465,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
        else
                prev_tx = put_tx - 1;
 
-       if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+       if (unlikely(np->put_tx_ctx == np->tx_skb))
                prev_tx_ctx = np->last_tx_ctx;
        else
                prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2600,7 +2601,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
                if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
                        np->get_tx.orig = np->tx_ring.orig;
                if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
-                       np->get_tx_ctx = np->first_tx_ctx;
+                       np->get_tx_ctx = np->tx_skb;
        }
 
        netdev_completed_queue(np->dev, tx_work, bytes_compl);
@@ -2654,7 +2655,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
                if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
                        np->get_tx.ex = np->tx_ring.ex;
                if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
-                       np->get_tx_ctx = np->first_tx_ctx;
+                       np->get_tx_ctx = np->tx_skb;
        }
 
        netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
index c9a55b774935cd8900435e308c441d2283e5cb39..07a2eb3781b12a307b71849e38d8cbcb6edf1a1e 100644 (file)
@@ -212,9 +212,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
                return -ENOENT;
        }
 
-       if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
-                  &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
-           != ETH_ALEN) {
+       if (!mac_pton(maddr, addr)) {
                dev_warn(&pdev->dev,
                         "can't parse mac address, not configuring\n");
                return -EINVAL;
index a3a70ade411fbfc1c423ec4ac42d6534a457f4ae..8a336517baac42151922c800282eaafbab271ca5 100644 (file)
@@ -494,6 +494,8 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
 void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
 int qede_configure_vlan_filters(struct qede_dev *edev);
 
+netdev_features_t qede_fix_features(struct net_device *dev,
+                                   netdev_features_t features);
 int qede_set_features(struct net_device *dev, netdev_features_t features);
 void qede_set_rx_mode(struct net_device *ndev);
 void qede_config_rx_mode(struct net_device *ndev);
index dae74127002288e939329097b0646cc9538922c1..4ca3847fffd44cb9aa6500fb3b515459353ec67e 100644 (file)
@@ -940,6 +940,9 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu)
        DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
                   "Configuring MTU size of %d\n", new_mtu);
 
+       if (new_mtu > PAGE_SIZE)
+               ndev->features &= ~NETIF_F_GRO_HW;
+
        /* Set the mtu field and re-start the interface if needed */
        args.u.mtu = new_mtu;
        args.func = &qede_update_mtu;
index c1a0708a7d7ca6a3e1ad7066ff989cc75451a0cb..77aa826227e51b8312785ebe3ef71a6ff9d23ca1 100644 (file)
@@ -895,19 +895,26 @@ static void qede_set_features_reload(struct qede_dev *edev,
        edev->ndev->features = args->u.features;
 }
 
+netdev_features_t qede_fix_features(struct net_device *dev,
+                                   netdev_features_t features)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
+           !(features & NETIF_F_GRO))
+               features &= ~NETIF_F_GRO_HW;
+
+       return features;
+}
+
 int qede_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct qede_dev *edev = netdev_priv(dev);
        netdev_features_t changes = features ^ dev->features;
        bool need_reload = false;
 
-       /* No action needed if hardware GRO is disabled during driver load */
-       if (changes & NETIF_F_GRO) {
-               if (dev->features & NETIF_F_GRO)
-                       need_reload = !edev->gro_disable;
-               else
-                       need_reload = edev->gro_disable;
-       }
+       if (changes & NETIF_F_GRO_HW)
+               need_reload = true;
 
        if (need_reload) {
                struct qede_reload_args args;
index 57332b3e5e6481459d13acba5a08bd1c5a8a2197..90d79ae2a48f0a808a708e34497fa39eeba545c7 100644 (file)
@@ -545,6 +545,7 @@ static const struct net_device_ops qede_netdev_ops = {
 #endif
        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+       .ndo_fix_features = qede_fix_features,
        .ndo_set_features = qede_set_features,
        .ndo_get_stats64 = qede_get_stats64,
 #ifdef CONFIG_QED_SRIOV
@@ -572,6 +573,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
        .ndo_change_mtu = qede_change_mtu,
        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+       .ndo_fix_features = qede_fix_features,
        .ndo_set_features = qede_set_features,
        .ndo_get_stats64 = qede_get_stats64,
        .ndo_udp_tunnel_add = qede_udp_tunnel_add,
@@ -589,6 +591,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
        .ndo_change_mtu = qede_change_mtu,
        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+       .ndo_fix_features = qede_fix_features,
        .ndo_set_features = qede_set_features,
        .ndo_get_stats64 = qede_get_stats64,
        .ndo_udp_tunnel_add = qede_udp_tunnel_add,
@@ -676,7 +679,7 @@ static void qede_init_ndev(struct qede_dev *edev)
        ndev->priv_flags |= IFF_UNICAST_FLT;
 
        /* user-changeble features */
-       hw_features = NETIF_F_GRO | NETIF_F_SG |
+       hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
                      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                      NETIF_F_TSO | NETIF_F_TSO6;
 
@@ -1228,18 +1231,9 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
        dma_addr_t mapping;
        int i;
 
-       /* Don't perform FW aggregations in case of XDP */
-       if (edev->xdp_prog)
-               edev->gro_disable = 1;
-
        if (edev->gro_disable)
                return 0;
 
-       if (edev->ndev->mtu > PAGE_SIZE) {
-               edev->gro_disable = 1;
-               return 0;
-       }
-
        for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
                struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
                struct sw_rx_data *replace_buf = &tpa_info->buffer;
@@ -1269,6 +1263,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
 err:
        qede_free_sge_mem(edev, rxq);
        edev->gro_disable = 1;
+       edev->ndev->features &= ~NETIF_F_GRO_HW;
        return -ENOMEM;
 }
 
@@ -1511,7 +1506,7 @@ static void qede_init_fp(struct qede_dev *edev)
                         edev->ndev->name, queue_id);
        }
 
-       edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
+       edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
 }
 
 static int qede_set_real_num_queues(struct qede_dev *edev)
index 70c92b649b299a1a16195e144e3da77cff25855c..38c924bdd32e46f3586eac77d5a63c361993fd09 100644 (file)
@@ -253,18 +253,18 @@ static int emac_open(struct net_device *netdev)
                return ret;
        }
 
-       ret = emac_mac_up(adpt);
+       ret = adpt->phy.open(adpt);
        if (ret) {
                emac_mac_rx_tx_rings_free_all(adpt);
                free_irq(irq->irq, irq);
                return ret;
        }
 
-       ret = adpt->phy.open(adpt);
+       ret = emac_mac_up(adpt);
        if (ret) {
-               emac_mac_down(adpt);
                emac_mac_rx_tx_rings_free_all(adpt);
                free_irq(irq->irq, irq);
+               adpt->phy.close(adpt);
                return ret;
        }
 
index 1f64c7f60943ae71828ae69df740e01b3c7c8ef3..8ae467db91620bee8d8444e19790ff6db9221eb0 100644 (file)
@@ -233,7 +233,7 @@ static int efx_ef10_get_vf_index(struct efx_nic *efx)
 
 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
 {
-       MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V3_OUT_LEN);
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
        size_t outlen;
        int rc;
@@ -306,6 +306,19 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
                          efx->vi_stride);
        }
 
+       if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
+               efx->num_mac_stats = MCDI_WORD(outbuf,
+                               GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
+               netif_dbg(efx, probe, efx->net_dev,
+                         "firmware reports num_mac_stats = %u\n",
+                         efx->num_mac_stats);
+       } else {
+               /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
+               netif_dbg(efx, probe, efx->net_dev,
+                         "firmware did not report num_mac_stats, assuming %u\n",
+                         efx->num_mac_stats);
+       }
+
        return 0;
 }
 
@@ -1630,6 +1643,29 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
        EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
        EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
        EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
+       EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
+       EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
+       EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
+       EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
+       EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
+       EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
+       EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START),
+       EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
+       EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
+       EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
+       EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
+       EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
+       EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
+       EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
+       EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
+       EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
+       EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
+       EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
+       EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
+       EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
+       EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
+       EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
+       EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
 };
 
 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) |     \
@@ -1705,6 +1741,43 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
        (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) |                      \
        (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
 
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_FEC_STAT_MASK (                                           \
+       (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) |             \
+       (1ULL << (EF10_STAT_fec_corrected_errors - 64)) |               \
+       (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) |        \
+       (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) |        \
+       (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) |        \
+       (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
+
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_CTPIO_STAT_MASK (                                         \
+       (1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) |                 \
+       (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) |             \
+       (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) |           \
+       (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) |           \
+       (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) |                \
+       (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) |               \
+       (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) |                 \
+       (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) |            \
+       (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) |             \
+       (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) |              \
+       (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) |          \
+       (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) |         \
+       (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) |                \
+       (1ULL << (EF10_STAT_ctpio_success - 64)) |                      \
+       (1ULL << (EF10_STAT_ctpio_fallback - 64)) |                     \
+       (1ULL << (EF10_STAT_ctpio_poison - 64)) |                       \
+       (1ULL << (EF10_STAT_ctpio_erase - 64)))
+
 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
 {
        u64 raw_mask = HUNT_COMMON_STAT_MASK;
@@ -1743,10 +1816,22 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
        if (nic_data->datapath_caps &
            (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
                raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
-               raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+               raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
        } else {
                raw_mask[1] = 0;
        }
+       /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
+       if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
+               raw_mask[1] |= EF10_FEC_STAT_MASK;
+
+       /* CTPIO stats appear in V3. Only show them on devices that actually
+        * support CTPIO. Although this driver doesn't use CTPIO others might,
+        * and we may be reporting the stats for the underlying port.
+        */
+       if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
+           (nic_data->datapath_caps2 &
+            (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
+               raw_mask[1] |= EF10_CTPIO_STAT_MASK;
 
 #if BITS_PER_LONG == 64
        BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
@@ -1850,7 +1935,7 @@ static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
 
        dma_stats = efx->stats_buffer.addr;
 
-       generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+       generation_end = dma_stats[efx->num_mac_stats - 1];
        if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
                return 0;
        rmb();
@@ -1898,7 +1983,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
        DECLARE_BITMAP(mask, EF10_STAT_COUNT);
        __le64 generation_start, generation_end;
        u64 *stats = nic_data->stats;
-       u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+       u32 dma_len = efx->num_mac_stats * sizeof(u64);
        struct efx_buffer stats_buf;
        __le64 *dma_stats;
        int rc;
@@ -1923,7 +2008,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
        }
 
        dma_stats = stats_buf.addr;
-       dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+       dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
 
        MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
        MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
@@ -1942,7 +2027,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
                goto out;
        }
 
-       generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+       generation_end = dma_stats[efx->num_mac_stats - 1];
        if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
                WARN_ON_ONCE(1);
                goto out;
index 7bcbedce07a540edd81a8d14f33e9f60adcaa247..3780161de5a1343b716c3f7f0a55934b48c7e551 100644 (file)
@@ -2983,6 +2983,8 @@ static int efx_init_struct(struct efx_nic *efx,
                efx->type->rx_ts_offset - efx->type->rx_prefix_size;
        spin_lock_init(&efx->stats_lock);
        efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
+       efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+       BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
        mutex_init(&efx->mac_lock);
        efx->phy_op = &efx_dummy_phy_operations;
        efx->mdio.dev = net_dev;
index 91fb54fd03d9b44e98202e188eb29bf4db5b8972..869d76f8f589b7f6a933c32d61c808d060cf6366 100644 (file)
 #define MCDI_HEADER_XFLAGS_WIDTH 8
 /* Request response using event */
 #define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
 
 /* Maximum number of payload bytes */
 #define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
 
 
 /* The MC can generate events for two reasons:
- *   - To complete a shared memory request if XFLAGS_EVREQ was set
+ *   - To advance a shared memory request if XFLAGS_EVREQ was set
  *   - As a notification (link state, i2c event), controlled
  *     via MC_CMD_LOG_CTRL
  *
 /* Returned by MC_CMD_TESTASSERT if the action that should
  * have caused an assertion failed to do so.  */
 #define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* The operation could not be completed because the PCIe link has gone
+ * away.  This error code is never expected to be returned over the TLP
+ * transport. */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* The operation could not be completed because the datapath has gone
+ * away.  This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary*/
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
 
 #define MC_CMD_ERR_CODE_OFST 0
 
 /* enum: Fatal. */
 #define          MCDI_EVENT_LEVEL_FATAL 0x3
 #define       MCDI_EVENT_DATA_OFST 0
+#define       MCDI_EVENT_DATA_LEN 4
 #define        MCDI_EVENT_CMDDONE_SEQ_LBN 0
 #define        MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
 #define        MCDI_EVENT_CMDDONE_DATALEN_LBN 8
 #define        MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
 #define        MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
 #define        MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: Link is down or link speed could not be determined */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN  0x0
 /* enum: 100Mbs */
 #define          MCDI_EVENT_LINKCHANGE_SPEED_100M  0x1
 /* enum: 1Gbs */
 #define          MCDI_EVENT_LINKCHANGE_SPEED_10G  0x3
 /* enum: 40Gbs */
 #define          MCDI_EVENT_LINKCHANGE_SPEED_40G  0x4
+/* enum: 25Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_25G  0x5
+/* enum: 50Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_50G  0x6
+/* enum: 100Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_100G  0x7
 #define        MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
 #define        MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
 #define        MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
 #define          MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
 /* enum: PTP status update */
 #define          MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define          MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define          MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define          MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define          MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define          MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+/* enum: Failure to probe one or more FPGA boot flash chips */
+#define          MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
+/* enum: FPGA boot-flash contains an invalid image header */
+#define          MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12
+/* enum: Failed to program clocks required by the FPGA */
+#define          MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
+/* enum: Notify that FPGA Controller is alive to serve MCDI requests */
+#define          MCDI_EVENT_AOE_FC_RUNNING 0x14
 #define        MCDI_EVENT_AOE_ERR_DATA_LBN 8
 #define        MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
+#define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
+/* enum: FC Assert happened, but the register information is not available */
+#define          MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
+/* enum: The register information for FC Assert is ready for readinng by driver
+ */
+#define          MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define          MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define          MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
 #define        MCDI_EVENT_RX_ERR_RXQ_LBN 0
 #define        MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
 #define        MCDI_EVENT_RX_ERR_TYPE_LBN 12
 #define          MCDI_EVENT_MUM_WATCHDOG 0x3
 #define        MCDI_EVENT_MUM_ERR_DATA_LBN 8
 #define        MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define        MCDI_EVENT_DBRET_SEQ_LBN 0
+#define        MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define        MCDI_EVENT_SUC_ERR_TYPE_LBN 0
+#define        MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
+/* enum: Corrupted or bad SUC application. */
+#define          MCDI_EVENT_SUC_BAD_APP 0x1
+/* enum: SUC application reported an assert. */
+#define          MCDI_EVENT_SUC_ASSERT 0x2
+/* enum: SUC application reported an exception. */
+#define          MCDI_EVENT_SUC_EXCEPTION 0x3
+/* enum: SUC watchdog timer expired. */
+#define          MCDI_EVENT_SUC_WATCHDOG 0x4
+#define        MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
+#define        MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define        MCDI_EVENT_SUC_ERR_DATA_LBN 8
+#define        MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
 #define       MCDI_EVENT_DATA_LBN 0
 #define       MCDI_EVENT_DATA_WIDTH 32
 #define       MCDI_EVENT_SRC_LBN 36
  * been processed and it may now resend the command
  */
 #define          MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: MCDI command accepted. New commands can be issued but this command is
+ * not done yet.
+ */
+#define          MCDI_EVENT_CODE_DBRET 0x1e
+/* enum: The MC has detected a fault on the SUC */
+#define          MCDI_EVENT_CODE_SUC 0x1f
 /* enum: Artificial event generated by host and posted via MC for test
  * purposes.
  */
 #define          MCDI_EVENT_CODE_TESTGEN  0xfa
 #define       MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define       MCDI_EVENT_CMDDONE_DATA_LEN 4
 #define       MCDI_EVENT_CMDDONE_DATA_LBN 0
 #define       MCDI_EVENT_CMDDONE_DATA_WIDTH 32
 #define       MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define       MCDI_EVENT_LINKCHANGE_DATA_LEN 4
 #define       MCDI_EVENT_LINKCHANGE_DATA_LBN 0
 #define       MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
 #define       MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define       MCDI_EVENT_SENSOREVT_DATA_LEN 4
 #define       MCDI_EVENT_SENSOREVT_DATA_LBN 0
 #define       MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
 #define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4
 #define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
 #define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
 #define       MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define       MCDI_EVENT_TX_ERR_DATA_LEN 4
 #define       MCDI_EVENT_TX_ERR_DATA_LBN 0
 #define       MCDI_EVENT_TX_ERR_DATA_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
  * timestamp
  */
 #define       MCDI_EVENT_PTP_SECONDS_OFST 0
+#define       MCDI_EVENT_PTP_SECONDS_LEN 4
 #define       MCDI_EVENT_PTP_SECONDS_LBN 0
 #define       MCDI_EVENT_PTP_SECONDS_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
  * timestamp
  */
 #define       MCDI_EVENT_PTP_MAJOR_OFST 0
+#define       MCDI_EVENT_PTP_MAJOR_LEN 4
 #define       MCDI_EVENT_PTP_MAJOR_LBN 0
 #define       MCDI_EVENT_PTP_MAJOR_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
  * of timestamp
  */
 #define       MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define       MCDI_EVENT_PTP_NANOSECONDS_LEN 4
 #define       MCDI_EVENT_PTP_NANOSECONDS_LBN 0
 #define       MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
  * timestamp
  */
 #define       MCDI_EVENT_PTP_MINOR_OFST 0
+#define       MCDI_EVENT_PTP_MINOR_LEN 4
 #define       MCDI_EVENT_PTP_MINOR_LBN 0
 #define       MCDI_EVENT_PTP_MINOR_WIDTH 32
 /* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
  */
 #define       MCDI_EVENT_PTP_UUID_OFST 0
+#define       MCDI_EVENT_PTP_UUID_LEN 4
 #define       MCDI_EVENT_PTP_UUID_LBN 0
 #define       MCDI_EVENT_PTP_UUID_WIDTH 32
 #define       MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define       MCDI_EVENT_RX_ERR_DATA_LEN 4
 #define       MCDI_EVENT_RX_ERR_DATA_LBN 0
 #define       MCDI_EVENT_RX_ERR_DATA_WIDTH 32
 #define       MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define       MCDI_EVENT_PAR_ERR_DATA_LEN 4
 #define       MCDI_EVENT_PAR_ERR_DATA_LBN 0
 #define       MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
 #define       MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define       MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4
 #define       MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
 #define       MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define       MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
 /* For CODE_PTP_TIME events, the major value of the PTP clock */
 #define       MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define       MCDI_EVENT_PTP_TIME_MAJOR_LEN 4
 #define       MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
 #define       MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
 /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19.
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8
 /* For CODE_PTP_TIME events where report sync status is enabled, indicates
  * whether the NIC clock has ever been set
  */
  */
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21.
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6
 #define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4
 #define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
 #define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
 #define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4
 #define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
 #define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
 /* Zero means that the request has been completed or authorized, and the driver
  */
 #define       MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
 #define       MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+#define       MCDI_EVENT_DBRET_DATA_OFST 0
+#define       MCDI_EVENT_DBRET_DATA_LEN 4
+#define       MCDI_EVENT_DBRET_DATA_LBN 0
+#define       MCDI_EVENT_DBRET_DATA_WIDTH 32
 
 /* FCDI_EVENT structuredef */
 #define    FCDI_EVENT_LEN 8
 /* enum: Fatal. */
 #define          FCDI_EVENT_LEVEL_FATAL 0x3
 #define       FCDI_EVENT_DATA_OFST 0
+#define       FCDI_EVENT_DATA_LEN 4
 #define        FCDI_EVENT_LINK_STATE_STATUS_LBN 0
 #define        FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
 #define          FCDI_EVENT_LINK_DOWN 0x0 /* enum */
 #define          FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
 #define          FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
 #define       FCDI_EVENT_ASSERT_TYPE_LBN 36
 #define       FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
 #define       FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
 #define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
 #define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
 #define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
 #define       FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define       FCDI_EVENT_LINK_STATE_DATA_LEN 4
 #define       FCDI_EVENT_LINK_STATE_DATA_LBN 0
 #define       FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
 #define       FCDI_EVENT_PTP_STATE_OFST 0
+#define       FCDI_EVENT_PTP_STATE_LEN 4
 #define          FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
 #define          FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
 #define          FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
 /* Index of MC port being referred to */
 #define       FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
 /* FC Port index that matches the MC port index in SRC */
 #define       FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
 #define       FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
 #define       FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
 #define       FCDI_EVENT_BOOT_RESULT_OFST 0
+#define       FCDI_EVENT_BOOT_RESULT_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
 #define       FCDI_EVENT_BOOT_RESULT_LBN 0
 #define    FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
 /* Number of timestamps following */
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define       FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
 /* Seconds field of a timestamp record */
 #define       FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define       FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
 #define       FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
 #define       FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
 /* Nanoseconds field of a timestamp record */
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
 /* Timestamp records comprising the event */
 /* enum: Fatal. */
 #define          MUM_EVENT_LEVEL_FATAL 0x3
 #define       MUM_EVENT_DATA_OFST 0
+#define       MUM_EVENT_DATA_LEN 4
 #define        MUM_EVENT_SENSOR_ID_LBN 0
 #define        MUM_EVENT_SENSOR_ID_WIDTH 8
 /*             Enum values, see field(s): */
 /* enum: Link fault has been asserted, or has cleared. */
 #define          MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
 #define       MUM_EVENT_SENSOR_DATA_OFST 0
+#define       MUM_EVENT_SENSOR_DATA_LEN 4
 #define       MUM_EVENT_SENSOR_DATA_LBN 0
 #define       MUM_EVENT_SENSOR_DATA_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_LEN 4
 #define       MUM_EVENT_PORT_PHY_FLAGS_LBN 0
 #define       MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
 #define       MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
 #define       MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define       MUM_EVENT_PORT_PHY_CAPS_LEN 4
 #define       MUM_EVENT_PORT_PHY_CAPS_LBN 0
 #define       MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define       MUM_EVENT_PORT_PHY_TECH_LEN 4
 #define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
 #define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
 #define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
 /* MC_CMD_READ32_IN msgrequest */
 #define    MC_CMD_READ32_IN_LEN 8
 #define       MC_CMD_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_READ32_IN_ADDR_LEN 4
 #define       MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define       MC_CMD_READ32_IN_NUMWORDS_LEN 4
 
 /* MC_CMD_READ32_OUT msgresponse */
 #define    MC_CMD_READ32_OUT_LENMIN 4
  */
 #define MC_CMD_WRITE32 0x2
 
-#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_WRITE32_IN msgrequest */
 #define    MC_CMD_WRITE32_IN_LENMIN 8
 #define    MC_CMD_WRITE32_IN_LENMAX 252
 #define    MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
 #define       MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_WRITE32_IN_ADDR_LEN 4
 #define       MC_CMD_WRITE32_IN_BUFFER_OFST 4
 #define       MC_CMD_WRITE32_IN_BUFFER_LEN 4
 #define       MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
  * is a bitfield, with each bit as documented below.
  */
 #define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define       MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
 #define          MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
 /* Destination address */
 #define       MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define       MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
 #define       MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define       MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
 /* Address of where to jump after copy. */
 #define       MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define       MC_CMD_COPYCODE_IN_JUMP_LEN 4
 /* enum: Control should return to the caller rather than jumping */
 #define          MC_CMD_COPYCODE_JUMP_NONE 0x1
 
  */
 #define MC_CMD_SET_FUNC 0x4
 
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_FUNC_IN msgrequest */
 #define    MC_CMD_SET_FUNC_IN_LEN 4
 /* Set function */
 #define       MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define       MC_CMD_SET_FUNC_IN_FUNC_LEN 4
 
 /* MC_CMD_SET_FUNC_OUT msgresponse */
 #define    MC_CMD_SET_FUNC_OUT_LEN 0
  */
 #define MC_CMD_GET_BOOT_STATUS 0x5
 
-#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
 #define    MC_CMD_GET_BOOT_STATUS_IN_LEN 0
 #define    MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
 /* ?? */
 #define       MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define       MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4
 /* enum: indicates that the MC wasn't flash booted */
 #define          MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL  0xdeadbeef
 #define       MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define       MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
 #define    MC_CMD_GET_ASSERTS_IN_LEN 4
 /* Set to clear assertion */
 #define       MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define       MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4
 
 /* MC_CMD_GET_ASSERTS_OUT msgresponse */
 #define    MC_CMD_GET_ASSERTS_OUT_LEN 140
 /* Assertion status flag. */
 #define       MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define       MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4
 /* enum: No assertions have failed. */
 #define          MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
 /* enum: A system-level assertion has failed. */
 #define          MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
 /* Failing PC value */
 #define       MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define       MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4
 /* Saved GP regs */
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
 #define          MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
 /* Failing thread address */
 #define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4
 #define       MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+#define       MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_LOG_CTRL_IN_LEN 8
 /* Log destination */
 #define       MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define       MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
 /* enum: UART. */
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
 /* enum: Event queue. */
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
 /* Legacy argument. Must be zero. */
 #define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4
 
 /* MC_CMD_LOG_CTRL_OUT msgresponse */
 #define    MC_CMD_LOG_CTRL_OUT_LEN 0
 #define    MC_CMD_GET_VERSION_EXT_IN_LEN 4
 /* placeholder, set to 0 */
 #define       MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+#define       MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4
 
 /* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
 #define    MC_CMD_GET_VERSION_V0_OUT_LEN 4
 #define       MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define       MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4
 /* enum: Reserved version number to indicate "any" version. */
 #define          MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
 /* enum: Bootrom version value for Siena. */
 #define          MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
 /* enum: Bootrom version value for Huntington. */
 #define          MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+/* enum: Bootrom version value for Medford2. */
+#define          MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002
 
 /* MC_CMD_GET_VERSION_OUT msgresponse */
 #define    MC_CMD_GET_VERSION_OUT_LEN 32
 /*            MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
 #define       MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define       MC_CMD_GET_VERSION_OUT_PCOL_LEN 4
 /* 128bit mask of functions supported by the current firmware */
 #define       MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
 #define       MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
 /* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
 #define    MC_CMD_GET_VERSION_EXT_OUT_LEN 48
 /*            MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
 #define       MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+#define       MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4
 /* 128bit mask of functions supported by the current firmware */
 #define       MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
 #define       MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
 #define          MC_CMD_PTP_OP_ENABLE 0x1
 /* enum: Disable PTP packet timestamping operation. */
 #define          MC_CMD_PTP_OP_DISABLE 0x2
-/* enum: Send a PTP packet. */
+/* enum: Send a PTP packet. This operation is used on Siena and Huntington.
+ * From Medford onwards it is not supported: on those platforms PTP transmit
+ * timestamping is done using the fast path.
+ */
 #define          MC_CMD_PTP_OP_TRANSMIT 0x3
 /* enum: Read the current NIC time. */
 #define          MC_CMD_PTP_OP_READ_NIC_TIME 0x4
-/* enum: Get the current PTP status. */
+/* enum: Get the current PTP status. Note that the clock frequency returned (in
+ * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666).
+ */
 #define          MC_CMD_PTP_OP_STATUS 0x5
 /* enum: Adjust the PTP NIC's time. */
 #define          MC_CMD_PTP_OP_ADJUST 0x6
 /* enum: Synchronize host and NIC time. */
 #define          MC_CMD_PTP_OP_SYNCHRONIZE 0x7
-/* enum: Basic manufacturing tests. */
+/* enum: Basic manufacturing tests. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
-/* enum: Packet based manufacturing tests. */
+/* enum: Packet based manufacturing tests. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
 /* enum: Reset some of the PTP related statistics */
 #define          MC_CMD_PTP_OP_RESET_STATS 0xa
 /* enum: Debug operations to MC. */
 #define          MC_CMD_PTP_OP_DEBUG 0xb
-/* enum: Read an FPGA register */
+/* enum: Read an FPGA register. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_FPGAREAD 0xc
-/* enum: Write an FPGA register */
+/* enum: Write an FPGA register. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_FPGAWRITE 0xd
 /* enum: Apply an offset to the NIC clock */
 #define          MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
-/* enum: Change Apply an offset to the NIC clock */
+/* enum: Change the frequency correction applied to the NIC clock */
 #define          MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
-/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+/* enum: Set the MC packet filter VLAN tags for received PTP packets.
+ * Deprecated for Huntington onwards.
+ */
 #define          MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
-/* enum: Set the MC packet filter UUID for received PTP packets */
+/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for
+ * Huntington onwards.
+ */
 #define          MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
-/* enum: Set the MC packet filter Domain for received PTP packets */
+/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated
+ * for Huntington onwards.
+ */
 #define          MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
-/* enum: Set the clock source */
+/* enum: Set the clock source. Required for snapper tests on Huntington and
+ * Medford. Not implemented for Siena or Medford2.
+ */
 #define          MC_CMD_PTP_OP_SET_CLK_SRC 0x13
-/* enum: Reset value of Timer Reg. */
+/* enum: Reset value of Timer Reg. Not implemented. */
 #define          MC_CMD_PTP_OP_RST_CLK 0x14
 /* enum: Enable the forwarding of PPS events to the host */
 #define          MC_CMD_PTP_OP_PPS_ENABLE 0x15
 /* enum: Unsubscribe to stop receiving time events */
 #define          MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
 /* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
- * input on the same NIC.
+ * input on the same NIC. Siena PTP adapters only.
  */
 #define          MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
 /* enum: Set the PTP sync status. Status is used by firmware to report to event
 /* MC_CMD_PTP_IN_ENABLE msgrequest */
 #define    MC_CMD_PTP_IN_ENABLE_LEN 16
 #define       MC_CMD_PTP_IN_CMD_OFST 0
+#define       MC_CMD_PTP_IN_CMD_LEN 4
 #define       MC_CMD_PTP_IN_PERIPH_ID_OFST 4
-/* Event queue for PTP events */
+#define       MC_CMD_PTP_IN_PERIPH_ID_LEN 4
+/* Not used. Events are always sent to function relative queue 0. */
 #define       MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
-/* PTP timestamping mode */
+#define       MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
+/* PTP timestamping mode. Not used from Huntington onwards. */
 #define       MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define       MC_CMD_PTP_IN_ENABLE_MODE_LEN 4
 /* enum: PTP, version 1 */
 #define          MC_CMD_PTP_MODE_V1 0x0
 /* enum: PTP, version 1, with VLAN headers - deprecated */
 /* MC_CMD_PTP_IN_DISABLE msgrequest */
 #define    MC_CMD_PTP_IN_DISABLE_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_TRANSMIT msgrequest */
 #define    MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
 #define    MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
 #define    MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Transmit packet length */
 #define       MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define       MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4
 /* Transmit packet data */
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
 /* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
 #define    MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */
+#define    MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_STATUS msgrequest */
 #define    MC_CMD_PTP_IN_STATUS_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_ADJUST_LEN 24
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Frequency adjustment 40 bit fixed point ns */
 #define       MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
 #define       MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
 #define       MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
 /* enum: Number of fractional bits in frequency adjustment */
 #define          MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+#define          MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c
 /* Time adjustment in seconds */
 #define       MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4
 /* Time adjustment major value */
 #define       MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4
 /* Time adjustment in nanoseconds */
 #define       MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4
 /* Time adjustment minor value */
 #define       MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */
+#define    MC_CMD_PTP_IN_ADJUST_V2_LEN 28
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/*               MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+/*               MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */
+/* Time adjustment in seconds */
+#define       MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define       MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4
 
 /* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
 #define    MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Number of time readings to capture */
 #define       MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define       MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4
 /* Host address in which to write "synchronization started" indication (64
  * bits)
  */
 /* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
 #define    MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
 #define    MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Enable or disable packet testing */
 #define       MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4
 
 /* MC_CMD_PTP_IN_RESET_STATS msgrequest */
 #define    MC_CMD_PTP_IN_RESET_STATS_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /* Reset PTP statistics */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_DEBUG msgrequest */
 #define    MC_CMD_PTP_IN_DEBUG_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Debug operations */
 #define       MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+#define       MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4
 
 /* MC_CMD_PTP_IN_FPGAREAD msgrequest */
 #define    MC_CMD_PTP_IN_FPGAREAD_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 #define       MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define       MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4
 #define       MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+#define       MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4
 
 /* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
 #define    MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
 #define    MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
 #define    MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 #define       MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define       MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
 /* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Time adjustment in seconds */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4
 /* Time adjustment major value */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4
 /* Time adjustment in nanoseconds */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4
 /* Time adjustment minor value */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */
+#define    MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4
 
 /* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Frequency adjustment 40 bit fixed point ns */
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
-/* enum: Number of fractional bits in frequency adjustment */
-/*               MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/*            Enum values, see field(s): */
+/*               MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
 
 /* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
 #define    MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Number of VLAN tags, 0 if not VLAN */
 #define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4
 /* Set of VLAN tags to filter against */
 #define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
 #define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
 /* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
 #define    MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* 1 to enable UUID filtering, 0 to disable */
 #define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4
 /* UUID to filter against */
 #define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
 #define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
 /* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
 #define    MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* 1 to enable Domain filtering, 0 to disable */
 #define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4
 /* Domain number to filter against */
 #define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+#define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4
 
 /* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
 #define    MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Set the clock source. */
 #define       MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+#define       MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4
 /* enum: Internal. */
 #define          MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
 /* enum: External. */
 /* MC_CMD_PTP_IN_RST_CLK msgrequest */
 #define    MC_CMD_PTP_IN_RST_CLK_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /* Reset value of Timer Reg. */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
 #define    MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /* Enable or disable */
 #define       MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+#define       MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4
 /* enum: Enable */
 #define          MC_CMD_PTP_ENABLE_PPS 0x0
 /* enum: Disable */
 #define          MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queue id to send events back */
+/* Not used. Events are always sent to function relative queue 0. */
 #define       MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+#define       MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
 
 /* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
 #define    MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
 #define    MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
 #define    MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
 #define    MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Original field containing queue ID. Now extended to include flags. */
 #define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
 /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
 #define    MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Unsubscribe options */
 #define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4
 /* enum: Unsubscribe a single queue */
 #define          MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
 /* enum: Unsubscribe all queues */
 #define          MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
 /* Event queue ID */
 #define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
 
 /* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
 #define    MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* 1 to enable PPS test mode, 0 to disable and return result. */
 #define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4
 
 /* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
 #define    MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* NIC - Host System Clock Synchronization status */
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4
 /* enum: Host System clock and NIC clock are not in sync */
 #define          MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
 /* enum: Host System clock and NIC clock are synchronized */
  * no longer in sync.
  */
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
 
 /* MC_CMD_PTP_OUT msgresponse */
 #define    MC_CMD_PTP_OUT_LEN 0
 #define    MC_CMD_PTP_OUT_TRANSMIT_LEN 8
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4
 /* Timestamp major value */
 #define       MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4
 /* Timestamp minor value */
 #define       MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4
 
 /* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
 #define    MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
 #define    MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4
 /* Timestamp major value */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4
 /* Timestamp minor value */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */
+#define    MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12
+/* Value of seconds timestamp */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4
+/* Timestamp major value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4
+/* Upper 32bits of major timestamp value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4
 
 /* MC_CMD_PTP_OUT_STATUS msgresponse */
 #define    MC_CMD_PTP_OUT_STATUS_LEN 64
 /* Frequency of NIC's hardware clock */
 #define       MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define       MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4
 /* Number of packets transmitted and timestamped */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define       MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4
 /* Number of packets received and timestamped */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define       MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4
 /* Number of packets timestamped by the FPGA */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define       MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4
 /* Number of packets filter matched */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define       MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4
 /* Number of packets not filter matched */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define       MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4
 /* Number of PPS overflows (noise on input?) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4
 /* Number of PPS bad periods */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4
 /* Minimum period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4
 /* Maximum period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4
 /* Last period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4
 /* Mean period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4
 /* Minimum offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4
 /* Maximum offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4
 /* Last offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4
 /* Mean offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4
 
 /* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
 #define    MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
 /* Host time immediately before NIC's hardware clock read */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4
 /* Timestamp major value */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4
 /* Timestamp minor value */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4
 /* Host time immediately after NIC's hardware clock read */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4
 /* Number of nanoseconds waited after reading NIC's hardware clock */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4
 
 /* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
 #define    MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
 /* Results of testing */
 #define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4
 /* enum: Successful test */
 #define          MC_CMD_PTP_MANF_SUCCESS 0x0
 /* enum: FPGA load failed */
 #define          MC_CMD_PTP_MANF_CLOCK_READ 0xe
 /* Presence of external oscillator */
 #define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+#define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4
 
 /* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
 #define    MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
 /* Results of testing */
 #define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4
 /* Number of packets received by FPGA */
 #define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4
 /* Number of packets received by Siena filters */
 #define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4
 
 /* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
 #define    MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
 /* Time format required/used by for this NIC. Applies to all PTP MCDI
  * operations that pass times between the host and firmware. If this operation
  * is not supported (older firmware) a format of seconds and nanoseconds should
- * be assumed.
+ * be assumed. Note this enum is deprecated. Do not add to it- use the
+ * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead.
  */
 #define       MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4
 /* enum: Times are in seconds and nanoseconds */
 #define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
  * be assumed.
  */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4
 /* enum: Times are in seconds and nanoseconds */
 #define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
 #define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
 /* enum: Major register has units of seconds, minor 2^-27s per tick */
 #define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3
 /* Minimum acceptable value for a corrected synchronization timeset. When
  * comparing host and NIC clock times, the MC returns a set of samples that
  * contain the host start and end time, the MC time when the host start was
  * end and start times minus the time that the MC waited for host end.
  */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4
 /* Various PTP capabilities */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
 
 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
 #define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
 /* Uncorrected error on PTP transmit timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4
 /* Uncorrected error on PTP receive timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4
 /* Uncorrected error on PPS output in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4
 /* Uncorrected error on PPS input in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4
 
 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
 #define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
 /* Uncorrected error on PTP transmit timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4
 /* Uncorrected error on PTP receive timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4
 /* Uncorrected error on PPS output in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4
 /* Uncorrected error on PPS input in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4
 /* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4
 /* Uncorrected error on non-PTP receive timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4
 
 /* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
 #define    MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
 /* Results of testing */
 #define       MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
 
  */
 #define MC_CMD_CSR_READ32 0xc
 
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CSR_READ32_IN msgrequest */
 #define    MC_CMD_CSR_READ32_IN_LEN 12
 /* Address */
 #define       MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_CSR_READ32_IN_ADDR_LEN 4
 #define       MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define       MC_CMD_CSR_READ32_IN_STEP_LEN 4
 #define       MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define       MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
 
 /* MC_CMD_CSR_READ32_OUT msgresponse */
 #define    MC_CMD_CSR_READ32_OUT_LENMIN 4
  */
 #define MC_CMD_CSR_WRITE32 0xd
 
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CSR_WRITE32_IN msgrequest */
 #define    MC_CMD_CSR_WRITE32_IN_LENMIN 12
 #define    MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
 /* Address */
 #define       MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define       MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
 /* MC_CMD_CSR_WRITE32_OUT msgresponse */
 #define    MC_CMD_CSR_WRITE32_OUT_LEN 4
 #define       MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+#define       MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
 
 
 /***********************************/
  * sensors.
  */
 #define       MC_CMD_HP_IN_SUBCMD_OFST 0
+#define       MC_CMD_HP_IN_SUBCMD_LEN 4
 /* enum: OCSD (Option Card Sensor Data) sub-command. */
 #define          MC_CMD_HP_IN_OCSD_SUBCMD 0x0
 /* enum: Last known valid HP sub-command. */
  * NULL.)
  */
 #define       MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+#define       MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
 
 /* MC_CMD_HP_OUT msgresponse */
 #define    MC_CMD_HP_OUT_LEN 4
 #define       MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+#define       MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
 /* enum: OCSD stopped for this card. */
 #define          MC_CMD_HP_OUT_OCSD_STOPPED 0x1
 /* enum: OCSD was successfully started with the address provided. */
  * external devices.
  */
 #define       MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define       MC_CMD_MDIO_READ_IN_BUS_LEN 4
 /* enum: Internal. */
 #define          MC_CMD_MDIO_BUS_INTERNAL 0x0
 /* enum: External. */
 #define          MC_CMD_MDIO_BUS_EXTERNAL 0x1
 /* Port address */
 #define       MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define       MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
 /* Device Address or clause 22. */
 #define       MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define       MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
  * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
  */
 #define          MC_CMD_MDIO_CLAUSE22 0x20
 /* Address */
 #define       MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define       MC_CMD_MDIO_READ_IN_ADDR_LEN 4
 
 /* MC_CMD_MDIO_READ_OUT msgresponse */
 #define    MC_CMD_MDIO_READ_OUT_LEN 8
 /* Value */
 #define       MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define       MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
 /* Status the MDIO commands return the raw status bits from the MDIO block. A
  * "good" transaction should have the DONE bit set and all other bits clear.
  */
 #define       MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define       MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
 /* enum: Good. */
 #define          MC_CMD_MDIO_STATUS_GOOD 0x8
 
  * external devices.
  */
 #define       MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define       MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
 /* enum: Internal. */
 /*               MC_CMD_MDIO_BUS_INTERNAL 0x0 */
 /* enum: External. */
 /*               MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
 /* Port address */
 #define       MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define       MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
 /* Device Address or clause 22. */
 #define       MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define       MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
  * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
  */
 /*               MC_CMD_MDIO_CLAUSE22 0x20 */
 /* Address */
 #define       MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define       MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
 /* Value */
 #define       MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define       MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
 
 /* MC_CMD_MDIO_WRITE_OUT msgresponse */
 #define    MC_CMD_MDIO_WRITE_OUT_LEN 4
  * "good" transaction should have the DONE bit set and all other bits clear.
  */
 #define       MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+#define       MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
 /* enum: Good. */
 /*               MC_CMD_MDIO_STATUS_GOOD 0x8 */
 
  */
 #define MC_CMD_DBI_WRITE 0x12
 
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DBI_WRITE_IN msgrequest */
 #define    MC_CMD_DBI_WRITE_IN_LENMIN 12
 /* MC_CMD_DBIWROP_TYPEDEF structuredef */
 #define    MC_CMD_DBIWROP_TYPEDEF_LEN 12
 #define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
 #define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
 #define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define       MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
 #define       MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define       MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
 #define       MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
 #define       MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
 
 #define    MC_CMD_PORT_READ32_IN_LEN 4
 /* Address */
 #define       MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_READ32_IN_ADDR_LEN 4
 
 /* MC_CMD_PORT_READ32_OUT msgresponse */
 #define    MC_CMD_PORT_READ32_OUT_LEN 8
 /* Value */
 #define       MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define       MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
 /* Status */
 #define       MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+#define       MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_PORT_WRITE32_IN_LEN 8
 /* Address */
 #define       MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
 /* Value */
 #define       MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define       MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
 
 /* MC_CMD_PORT_WRITE32_OUT msgresponse */
 #define    MC_CMD_PORT_WRITE32_OUT_LEN 4
 /* Status */
 #define       MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+#define       MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_PORT_READ128_IN_LEN 4
 /* Address */
 #define       MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_READ128_IN_ADDR_LEN 4
 
 /* MC_CMD_PORT_READ128_OUT msgresponse */
 #define    MC_CMD_PORT_READ128_OUT_LEN 20
 #define       MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
 /* Status */
 #define       MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+#define       MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_PORT_WRITE128_IN_LEN 20
 /* Address */
 #define       MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
 /* Value */
 #define       MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
 #define       MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
 #define    MC_CMD_PORT_WRITE128_OUT_LEN 4
 /* Status */
 #define       MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+#define       MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
 
 /* MC_CMD_CAPABILITIES structuredef */
 #define    MC_CMD_CAPABILITIES_LEN 4
 #define    MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
 #define    MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
-/* See MC_CMD_CAPABILITIES */
+/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-/* See MC_CMD_CAPABILITIES */
+#define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4
+/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4
+/* Base MAC address for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+/* Base MAC address for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4
+/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port0. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port1. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
-/* This field contains a 16-bit value for each of the types of NVRAM area. The
- * values are defined in the firmware/mc/platform/.c file for a specific board
- * type, but otherwise have no meaning to the MC; they are used by the driver
- * to manage selection of appropriate firmware updates.
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4
+/* Siena only. This field contains a 16-bit value for each of the types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a
+ * specific board type, but otherwise have no meaning to the MC; they are used
+ * by the driver to manage selection of appropriate firmware updates. Unused on
+ * EF10 and later (use MC_CMD_NVRAM_METADATA).
  */
 #define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
 #define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
  */
 #define MC_CMD_DBI_READX 0x19
 
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DBI_READX_IN msgrequest */
 #define    MC_CMD_DBI_READX_IN_LENMIN 8
 /* MC_CMD_DBIRDOP_TYPEDEF structuredef */
 #define    MC_CMD_DBIRDOP_TYPEDEF_LEN 8
 #define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
 #define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
 #define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
 #define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
  */
 #define MC_CMD_SET_RAND_SEED 0x1a
 
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_RAND_SEED_IN msgrequest */
 #define    MC_CMD_SET_RAND_SEED_IN_LEN 16
 #define    MC_CMD_DRV_ATTACH_IN_LEN 12
 /* new state to set if UPDATE=1 */
 #define       MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
 #define        MC_CMD_DRV_ATTACH_LBN 0
 #define        MC_CMD_DRV_ATTACH_WIDTH 1
 #define        MC_CMD_DRV_PREBOOT_LBN 1
 #define        MC_CMD_DRV_PREBOOT_WIDTH 1
 /* 1 to set new state, or 0 to just report the existing state */
 #define       MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define       MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
 /* preferred datapath firmware (for Huntington; ignored for Siena) */
 #define       MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+#define       MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4
 /* enum: Prefer to use full featured firmware */
 #define          MC_CMD_FW_FULL_FEATURED 0x0
 /* enum: Prefer to use firmware with fewer features but lower latency */
 #define    MC_CMD_DRV_ATTACH_OUT_LEN 4
 /* previous or existing state, see the bitmask at NEW_STATE */
 #define       MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4
 
 /* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
 #define    MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
 /* previous or existing state, see the bitmask at NEW_STATE */
 #define       MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4
 /* Flags associated with this function */
 #define       MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+#define       MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
 /* enum: Labels the lowest-numbered function visible to the OS */
 #define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
 /* enum: The function can control the link state of the physical port it is
 #define    MC_CMD_SHMUART_IN_LEN 4
 /* ??? */
 #define       MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define       MC_CMD_SHMUART_IN_FLAG_LEN 4
 
 /* MC_CMD_SHMUART_OUT msgresponse */
 #define    MC_CMD_SHMUART_OUT_LEN 0
  * (TBD).
  */
 #define       MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define       MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
 #define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
 #define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
 
 #define    MC_CMD_PCIE_CREDITS_IN_LEN 8
 /* poll period. 0 is disabled */
 #define       MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define       MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
 /* wipe statistics */
 #define       MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+#define       MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
 
 /* MC_CMD_PCIE_CREDITS_OUT msgresponse */
 #define    MC_CMD_PCIE_CREDITS_OUT_LEN 16
 /* MC_CMD_RXD_MONITOR_IN msgrequest */
 #define    MC_CMD_RXD_MONITOR_IN_LEN 12
 #define       MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define       MC_CMD_RXD_MONITOR_IN_QID_LEN 4
 #define       MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define       MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
 #define       MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+#define       MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
 
 /* MC_CMD_RXD_MONITOR_OUT msgresponse */
 #define    MC_CMD_RXD_MONITOR_OUT_LEN 80
 #define       MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define       MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define       MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_PUTS 0x23
 
-#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_PUTS_IN msgrequest */
 #define    MC_CMD_PUTS_IN_LENMIN 13
 #define    MC_CMD_PUTS_IN_LENMAX 252
 #define    MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
 #define       MC_CMD_PUTS_IN_DEST_OFST 0
+#define       MC_CMD_PUTS_IN_DEST_LEN 4
 #define        MC_CMD_PUTS_IN_UART_LBN 0
 #define        MC_CMD_PUTS_IN_UART_WIDTH 1
 #define        MC_CMD_PUTS_IN_PORT_LBN 1
 #define    MC_CMD_GET_PHY_CFG_OUT_LEN 72
 /* flags */
 #define       MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
 #define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define       MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4
 /* Bitmask of supported capabilities */
 #define       MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define       MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
 #define        MC_CMD_PHY_CAP_10HDX_LBN 1
 #define        MC_CMD_PHY_CAP_10HDX_WIDTH 1
 #define        MC_CMD_PHY_CAP_10FDX_LBN 2
 #define        MC_CMD_PHY_CAP_40000FDX_WIDTH 1
 #define        MC_CMD_PHY_CAP_DDM_LBN 12
 #define        MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define        MC_CMD_PHY_CAP_100000FDX_LBN 13
+#define        MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_25000FDX_LBN 14
+#define        MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_50000FDX_LBN 15
+#define        MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_BASER_FEC_LBN 16
+#define        MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
+#define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define        MC_CMD_PHY_CAP_RS_FEC_LBN 18
+#define        MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
+#define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define       MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define       MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define       MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
 #define       MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define       MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4
 /* enum: Xaui. */
 #define          MC_CMD_MEDIA_XAUI 0x1
 /* enum: CX4. */
 /* enum: QSFP+. */
 #define          MC_CMD_MEDIA_QSFP_PLUS 0x7
 #define       MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define       MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
 /* enum: Native clause 22 */
 #define          MC_CMD_MMD_CLAUSE22 0x0
 #define          MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
 #define    MC_CMD_START_BIST_IN_LEN 4
 /* Type of test. */
 #define       MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define       MC_CMD_START_BIST_IN_TYPE_LEN 4
 /* enum: Run the PHY's short cable BIST. */
 #define          MC_CMD_PHY_BIST_CABLE_SHORT 0x1
 /* enum: Run the PHY's long cable BIST. */
 #define    MC_CMD_POLL_BIST_OUT_LEN 8
 /* result */
 #define       MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define       MC_CMD_POLL_BIST_OUT_RESULT_LEN 4
 /* enum: Running. */
 #define          MC_CMD_POLL_BIST_RUNNING 0x1
 /* enum: Passed. */
 /* enum: Timed-out. */
 #define          MC_CMD_POLL_BIST_TIMEOUT 0x4
 #define       MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4
 
 /* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
 #define    MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
 /* result */
 /*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4
 /* Status of each channel A */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4
 /* enum: Ok. */
 #define          MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
 /* enum: Open. */
 #define          MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
 /* Status of each channel B */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4
 /*            Enum values, see field(s): */
 /*               CABLE_STATUS_A */
 /* Status of each channel C */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4
 /*            Enum values, see field(s): */
 /*               CABLE_STATUS_A */
 /* Status of each channel D */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4
 /*            Enum values, see field(s): */
 /*               CABLE_STATUS_A */
 
 #define    MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
 /* result */
 /*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
 #define       MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4
 /* enum: Complete. */
 #define          MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
 /* enum: Bus switch off I2C write. */
 #define    MC_CMD_POLL_BIST_OUT_MEM_LEN 36
 /* result */
 /*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
 #define       MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4
 /* enum: Test has completed. */
 #define          MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
 /* enum: RAM test - walk ones. */
 #define          MC_CMD_POLL_BIST_MEM_ECC 0x6
 /* Failure address, only valid if result is POLL_BIST_FAILED */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+#define       MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4
 /* Bus or address space to which the failure address corresponds */
 #define       MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+#define       MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4
 /* enum: MC MIPS bus. */
 #define          MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
 /* enum: CSR IREG bus. */
 #define          MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
 /* Pattern written to RAM / register */
 #define       MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+#define       MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4
 /* Actual value read from RAM / register */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+#define       MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4
 /* ECC error mask */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4
 /* ECC parity error mask */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4
 /* ECC fatal error mask */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4
 
 
 /***********************************/
 /*            Enum values, see field(s): */
 /*               100M */
 
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for
+ * newer NICs with 25G/50G/100G support
+ */
+#define    MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+/* enum: None. */
+/*               MC_CMD_LOOPBACK_NONE  0x0 */
+/* enum: Data. */
+/*               MC_CMD_LOOPBACK_DATA  0x1 */
+/* enum: GMAC. */
+/*               MC_CMD_LOOPBACK_GMAC  0x2 */
+/* enum: XGMII. */
+/*               MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/*               MC_CMD_LOOPBACK_XGXS  0x4 */
+/* enum: XAUI. */
+/*               MC_CMD_LOOPBACK_XAUI  0x5 */
+/* enum: GMII. */
+/*               MC_CMD_LOOPBACK_GMII  0x6 */
+/* enum: SGMII. */
+/*               MC_CMD_LOOPBACK_SGMII  0x7 */
+/* enum: XGBR. */
+/*               MC_CMD_LOOPBACK_XGBR  0x8 */
+/* enum: XFI. */
+/*               MC_CMD_LOOPBACK_XFI  0x9 */
+/* enum: XAUI Far. */
+/*               MC_CMD_LOOPBACK_XAUI_FAR  0xa */
+/* enum: GMII Far. */
+/*               MC_CMD_LOOPBACK_GMII_FAR  0xb */
+/* enum: SGMII Far. */
+/*               MC_CMD_LOOPBACK_SGMII_FAR  0xc */
+/* enum: XFI Far. */
+/*               MC_CMD_LOOPBACK_XFI_FAR  0xd */
+/* enum: GPhy. */
+/*               MC_CMD_LOOPBACK_GPHY  0xe */
+/* enum: PhyXS. */
+/*               MC_CMD_LOOPBACK_PHYXS  0xf */
+/* enum: PCS. */
+/*               MC_CMD_LOOPBACK_PCS  0x10 */
+/* enum: PMA-PMD. */
+/*               MC_CMD_LOOPBACK_PMAPMD  0x11 */
+/* enum: Cross-Port. */
+/*               MC_CMD_LOOPBACK_XPORT  0x12 */
+/* enum: XGMII-Wireside. */
+/*               MC_CMD_LOOPBACK_XGMII_WS  0x13 */
+/* enum: XAUI Wireside. */
+/*               MC_CMD_LOOPBACK_XAUI_WS  0x14 */
+/* enum: XAUI Wireside Far. */
+/*               MC_CMD_LOOPBACK_XAUI_WS_FAR  0x15 */
+/* enum: XAUI Wireside near. */
+/*               MC_CMD_LOOPBACK_XAUI_WS_NEAR  0x16 */
+/* enum: GMII Wireside. */
+/*               MC_CMD_LOOPBACK_GMII_WS  0x17 */
+/* enum: XFI Wireside. */
+/*               MC_CMD_LOOPBACK_XFI_WS  0x18 */
+/* enum: XFI Wireside Far. */
+/*               MC_CMD_LOOPBACK_XFI_WS_FAR  0x19 */
+/* enum: PhyXS Wireside. */
+/*               MC_CMD_LOOPBACK_PHYXS_WS  0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/*               MC_CMD_LOOPBACK_PMA_INT  0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/*               MC_CMD_LOOPBACK_SD_NEAR  0x1c */
+/* enum: KR Serdes Serial. */
+/*               MC_CMD_LOOPBACK_SD_FAR  0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/*               MC_CMD_LOOPBACK_PMA_INT_WS  0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/*               MC_CMD_LOOPBACK_SD_FEP2_WS  0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/*               MC_CMD_LOOPBACK_SD_FEP1_5_WS  0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/*               MC_CMD_LOOPBACK_SD_FEP_WS  0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/*               MC_CMD_LOOPBACK_SD_FES_WS  0x22 */
+/* enum: Near side of AOE Siena side port */
+/*               MC_CMD_LOOPBACK_AOE_INT_NEAR  0x23 */
+/* enum: Medford Wireside datapath loopback */
+/*               MC_CMD_LOOPBACK_DATA_WS  0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/*               MC_CMD_LOOPBACK_FORCE_EXT_LINK  0x25 */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 25G loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 50 loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 100G loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+/*            Enum values, see field(s): */
+/*               100M */
+
 
 /***********************************/
 /* MC_CMD_GET_LINK
 #define    MC_CMD_GET_LINK_OUT_LEN 28
 /* near-side advertised capabilities */
 #define       MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define       MC_CMD_GET_LINK_OUT_CAP_LEN 4
 /* link-partner advertised capabilities */
 #define       MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define       MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4
 /* Autonegotiated speed in mbit/s. The link may still be down even if this
  * reads non-zero.
  */
 #define       MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define       MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4
 /* Current loopback setting. */
 #define       MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define       MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
 #define       MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define       MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
 #define        MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
 #define        MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
 #define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
 /* This returns the negotiated flow control value. */
 #define       MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define       MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
 #define       MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define       MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
 #define        MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
 #define    MC_CMD_SET_LINK_IN_LEN 16
 /* ??? */
 #define       MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define       MC_CMD_SET_LINK_IN_CAP_LEN 4
 /* Flags */
 #define       MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define       MC_CMD_SET_LINK_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
 #define        MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
 #define        MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
 #define        MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
 /* Loopback mode. */
 #define       MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define       MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
 /* A loopback speed of "0" is supported, and means (choose any available
  * speed).
  */
 #define       MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define       MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
 
 /* MC_CMD_SET_LINK_OUT msgresponse */
 #define    MC_CMD_SET_LINK_OUT_LEN 0
 #define    MC_CMD_SET_ID_LED_IN_LEN 4
 /* Set LED state. */
 #define       MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define       MC_CMD_SET_ID_LED_IN_STATE_LEN 4
 #define          MC_CMD_LED_OFF  0x0 /* enum */
 #define          MC_CMD_LED_ON  0x1 /* enum */
 #define          MC_CMD_LED_DEFAULT  0x2 /* enum */
  * EtherII, VLAN, bug16011 padding).
  */
 #define       MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define       MC_CMD_SET_MAC_IN_MTU_LEN 4
 #define       MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define       MC_CMD_SET_MAC_IN_DRAIN_LEN 4
 #define       MC_CMD_SET_MAC_IN_ADDR_OFST 8
 #define       MC_CMD_SET_MAC_IN_ADDR_LEN 8
 #define       MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
 #define       MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
 #define       MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define       MC_CMD_SET_MAC_IN_REJECT_LEN 4
 #define        MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
 #define        MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define       MC_CMD_SET_MAC_IN_FCNTL_LEN 4
 /* enum: Flow control is off. */
 #define          MC_CMD_FCNTL_OFF 0x0
 /* enum: Respond to flow control. */
 /* enum: Issue flow control. */
 #define          MC_CMD_FCNTL_GENERATE 0x5
 #define       MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define       MC_CMD_SET_MAC_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
 #define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
 
  * EtherII, VLAN, bug16011 padding).
  */
 #define       MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define       MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4
 #define       MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define       MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
 #define       MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define       MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+#define       MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4
 /* enum: Flow control is off. */
 /*               MC_CMD_FCNTL_OFF 0x0 */
 /* enum: Respond to flow control. */
 /* enum: Issue flow control. */
 /*               MC_CMD_FCNTL_GENERATE 0x5 */
 #define       MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define       MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
 /* Select which parameters to configure. A parameter will only be modified if
  * set).
  */
 #define       MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define       MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
  * to 0.
  */
 #define       MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+#define       MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4
 
 
 /***********************************/
 #define       MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
 #define       MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
 #define       MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define       MC_CMD_MAC_STATS_IN_CMD_LEN 4
 #define        MC_CMD_MAC_STATS_IN_DMA_LBN 0
 #define        MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
 #define        MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
 #define        MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
 #define       MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+#define       MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4
 /* port id so vadapter stats can be provided */
 #define       MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+#define       MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
 
 /* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
 #define    MC_CMD_MAC_STATS_OUT_DMA_LEN 0
 #define          MC_CMD_GMAC_DMABUF_START  0x40
 /* enum: End of GMAC stats buffer space, for Siena only. */
 #define          MC_CMD_GMAC_DMABUF_END    0x5f
-#define          MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+/* enum: GENERATION_END value, used together with GENERATION_START to verify
+ * consistency of DMAd data. For legacy firmware / drivers without extended
+ * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise,
+ * this value is invalid/ reserved and GENERATION_END is written as the last
+ * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that
+ * this is consistent with the legacy behaviour, in the sense that entry 96 is
+ * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details.
+ */
+#define          MC_CMD_MAC_GENERATION_END 0x60
 #define          MC_CMD_MAC_NSTATS  0x61 /* enum */
 
+/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3)
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum: Start of FEC stats buffer space, Medford2 and up */
+#define          MC_CMD_MAC_FEC_DMABUF_START  0x61
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define          MC_CMD_MAC_FEC_UNCORRECTED_ERRORS  0x61
+/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define          MC_CMD_MAC_FEC_CORRECTED_ERRORS  0x62
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0  0x63
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1  0x64
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2  0x65
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3  0x66
+/* enum: This includes the space at offset 103 which is the final
+ * GENERATION_END in a MAC_STATS_V2 response and otherwise unused.
+ */
+#define          MC_CMD_MAC_NSTATS_V2  0x68
+/*            Other enum values, see field(s): */
+/*               MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3)
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum: Start of CTPIO stats buffer space, Medford2 and up */
+#define          MC_CMD_MAC_CTPIO_DMABUF_START  0x68
+/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
+ * target VI
+ */
+#define          MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK  0x68
+/* enum: Number of times a CTPIO send wrote beyond frame end (informational
+ * only)
+ */
+#define          MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS  0x69
+/* enum: Number of CTPIO failures because the TX doorbell was written before
+ * the end of the frame data
+ */
+#define          MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL  0x6a
+/* enum: Number of CTPIO failures because the internal FIFO overflowed */
+#define          MC_CMD_MAC_CTPIO_OVERFLOW_FAIL  0x6b
+/* enum: Number of CTPIO failures because the host did not deliver data fast
+ * enough to avoid MAC underflow
+ */
+#define          MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL  0x6c
+/* enum: Number of CTPIO failures because the host did not deliver all the
+ * frame data within the timeout
+ */
+#define          MC_CMD_MAC_CTPIO_TIMEOUT_FAIL  0x6d
+/* enum: Number of CTPIO failures because the frame data arrived out of order
+ * or with gaps
+ */
+#define          MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL  0x6e
+/* enum: Number of CTPIO failures because the host started a new frame before
+ * completing the previous one
+ */
+#define          MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL  0x6f
+/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits
+ * or not 32-bit aligned
+ */
+#define          MC_CMD_MAC_CTPIO_INVALID_WR_FAIL  0x70
+/* enum: Number of CTPIO fallbacks because another VI on the same port was
+ * sending a CTPIO frame
+ */
+#define          MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK  0x71
+/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled
+ */
+#define          MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK  0x72
+/* enum: Number of CTPIO fallbacks because length in header was less than 29
+ * bytes
+ */
+#define          MC_CMD_MAC_CTPIO_RUNT_FALLBACK  0x73
+/* enum: Total number of successful CTPIO sends on this port */
+#define          MC_CMD_MAC_CTPIO_SUCCESS  0x74
+/* enum: Total number of CTPIO fallbacks on this port */
+#define          MC_CMD_MAC_CTPIO_FALLBACK  0x75
+/* enum: Total number of CTPIO poisoned frames on this port, whether erased or
+ * not
+ */
+#define          MC_CMD_MAC_CTPIO_POISON  0x76
+/* enum: Total number of CTPIO erased frames on this port */
+#define          MC_CMD_MAC_CTPIO_ERASE  0x77
+/* enum: This includes the space at offset 120 which is the final
+ * GENERATION_END in a MAC_STATS_V3 response and otherwise unused.
+ */
+#define          MC_CMD_MAC_NSTATS_V3  0x79
+/*            Other enum values, see field(s): */
+/*               MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
+
 
 /***********************************/
 /* MC_CMD_SRIOV
 /* MC_CMD_SRIOV_IN msgrequest */
 #define    MC_CMD_SRIOV_IN_LEN 12
 #define       MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define       MC_CMD_SRIOV_IN_ENABLE_LEN 4
 #define       MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define       MC_CMD_SRIOV_IN_VI_BASE_LEN 4
 #define       MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+#define       MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
 
 /* MC_CMD_SRIOV_OUT msgresponse */
 #define    MC_CMD_SRIOV_OUT_LEN 8
 #define       MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define       MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
 #define       MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+#define       MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
 
 /* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
 #define    MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
 /* this is only used for the first record */
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
 #define          MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
 
 /* MC_CMD_WOL_FILTER_SET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_LEN 192
 #define       MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define       MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4
 #define          MC_CMD_FILTER_MODE_SIMPLE    0x0 /* enum */
 #define          MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
 /* A type value of 1 is unused. */
 #define       MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define       MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4
 /* enum: Magic */
 #define          MC_CMD_WOL_TYPE_MAGIC      0x0
 /* enum: MS Windows Magic */
 /* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
 #define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
 #define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
 /* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
 /* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
 /* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
 #define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
 #define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
 /* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
 /* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
 #define    MC_CMD_WOL_FILTER_SET_OUT_LEN 4
 #define       MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4
 
 
 /***********************************/
 /* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
 #define       MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4
 
 /* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
 #define    MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
 /* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_RESET_IN_LEN 4
 #define       MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define       MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
 #define          MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
 #define          MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
 
 #define    MC_CMD_NVRAM_TYPES_OUT_LEN 4
 /* Bit mask of supported types. */
 #define       MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define       MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
 /* enum: Disabled callisto. */
 #define          MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
 /* enum: MC firmware. */
 /* MC_CMD_NVRAM_INFO_IN msgrequest */
 #define    MC_CMD_NVRAM_INFO_IN_LEN 4
 #define       MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 
 /* MC_CMD_NVRAM_INFO_OUT msgresponse */
 #define    MC_CMD_NVRAM_INFO_OUT_LEN 24
 #define       MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define       MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define       MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define       MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
 #define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
 #define        MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
 #define        MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
 #define        MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
 #define       MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define       MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4
 #define       MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+#define       MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4
 
 /* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
 #define    MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
 #define       MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define       MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define       MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define       MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
 #define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
 #define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
 #define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
 #define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
 #define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+#define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4
 /* Writes must be multiples of this size. Added to support the MUM on Sorrento.
  */
 #define       MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+#define       MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4
 
 
 /***********************************/
  */
 #define    MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
 #define       MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 
  */
 #define    MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
 #define       MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
 #define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
 
 /* MC_CMD_NVRAM_READ_IN msgrequest */
 #define    MC_CMD_NVRAM_READ_IN_LEN 12
 #define       MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_READ_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4
 /* amount to read in bytes */
 #define       MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4
 
 /* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
 #define    MC_CMD_NVRAM_READ_IN_V2_LEN 16
 #define       MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+#define       MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4
 /* amount to read in bytes */
 #define       MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4
 /* Optional control info. If a partition is stored with an A/B versioning
  * scheme (i.e. in more than one physical partition in NVRAM) the host can set
  * this to control which underlying physical partition is used to read data
  * verifying by reading with MODE=TARGET_BACKUP.
  */
 #define       MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+#define       MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4
 /* enum: Same as omitting MODE: caller sees data in current partition unless it
  * holds the write lock in which case it sees data in the partition it is
  * updating.
 #define    MC_CMD_NVRAM_WRITE_IN_LENMAX 252
 #define    MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
 #define       MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4
 #define       MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
 /* MC_CMD_NVRAM_ERASE_IN msgrequest */
 #define    MC_CMD_NVRAM_ERASE_IN_LEN 12
 #define       MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4
 #define       MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4
 
 /* MC_CMD_NVRAM_ERASE_OUT msgresponse */
 #define    MC_CMD_NVRAM_ERASE_OUT_LEN 0
  */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
 #define       MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4
 
 /* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
  * request with additional flags indicating version of NVRAM_UPDATE commands in
  */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
 #define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
 
  * This process takes a few seconds to complete. So is likely to take more than
  * the MCDI timeout. Hence signature verification is initiated when
  * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
- * MCDI command returns immediately with error code EAGAIN. Subsequent
- * NVRAM_UPDATE_FINISH_V2_IN requests also return EAGAIN if the verification is
- * in progress. Once the verification has completed, this response payload
- * includes the results of the signature verification. Note that the nvram lock
- * in firmware is only released after the verification has completed and the
- * host has read back the result code from firmware.
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
  */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
 /* Result of nvram update completion processing */
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
 /* enum: Verify succeeded without any errors. */
 #define          MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
 /* enum: CMS format verification failed due to an internal error. */
  * Trusted approver's list.
  */
 #define          MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+/* enum: The image has a lower security level than the current firmware. */
+#define          MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
 
 
 /***********************************/
 /* MC_CMD_REBOOT_IN msgrequest */
 #define    MC_CMD_REBOOT_IN_LEN 4
 #define       MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define       MC_CMD_REBOOT_IN_FLAGS_LEN 4
 #define          MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
 
 /* MC_CMD_REBOOT_OUT msgresponse */
  */
 #define MC_CMD_REBOOT_MODE 0x3f
 
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_REBOOT_MODE_IN msgrequest */
 #define    MC_CMD_REBOOT_MODE_IN_LEN 4
 #define       MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define       MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
 /* enum: Normal. */
 #define          MC_CMD_REBOOT_MODE_NORMAL 0x0
 /* enum: Power-on Reset. */
 /* MC_CMD_REBOOT_MODE_OUT msgresponse */
 #define    MC_CMD_REBOOT_MODE_OUT_LEN 4
 #define       MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define       MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_SENSOR_INFO 0x41
 
-#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_SENSOR_INFO_IN msgrequest */
 #define    MC_CMD_SENSOR_INFO_IN_LEN 0
  * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
  */
 #define       MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+#define       MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
 
 /* MC_CMD_SENSOR_INFO_OUT msgresponse */
 #define    MC_CMD_SENSOR_INFO_OUT_LENMIN 4
 #define    MC_CMD_SENSOR_INFO_OUT_LENMAX 252
 #define    MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
 #define       MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define       MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
 /* enum: Controller temperature: degC */
 #define          MC_CMD_SENSOR_CONTROLLER_TEMP  0x0
 /* enum: Phy common temperature: degC */
 #define          MC_CMD_SENSOR_BOARD_FRONT_TEMP  0x4f
 /* enum: Board temperature (back): degC */
 #define          MC_CMD_SENSOR_BOARD_BACK_TEMP  0x50
+/* enum: 1.8v power current: mA */
+#define          MC_CMD_SENSOR_IN_I1V8  0x51
+/* enum: 2.5v power current: mA */
+#define          MC_CMD_SENSOR_IN_I2V5  0x52
+/* enum: 3.3v power current: mA */
+#define          MC_CMD_SENSOR_IN_I3V3  0x53
+/* enum: 12v power current: mA */
+#define          MC_CMD_SENSOR_IN_I12V0  0x54
+/* enum: 1.3v power: mV */
+#define          MC_CMD_SENSOR_IN_1V3  0x55
+/* enum: 1.3v power current: mA */
+#define          MC_CMD_SENSOR_IN_I1V3  0x56
+/* enum: Not a sensor: reserved for the next page flag */
+#define          MC_CMD_SENSOR_PAGE2_NEXT  0x5f
 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
 #define       MC_CMD_SENSOR_ENTRY_OFST 4
 #define       MC_CMD_SENSOR_ENTRY_LEN 8
 #define    MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
 #define    MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
 #define       MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+#define       MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SENSOR_INFO_OUT */
 #define        MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
  */
 #define MC_CMD_READ_SENSORS 0x42
 
-#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_READ_SENSORS_IN msgrequest */
 #define    MC_CMD_READ_SENSORS_IN_LEN 8
 #define       MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
 /* Size in bytes of host buffer. */
 #define       MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+#define       MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
 
 /* MC_CMD_READ_SENSORS_OUT msgresponse */
 #define    MC_CMD_READ_SENSORS_OUT_LEN 0
 /* MC_CMD_GET_PHY_STATE_OUT msgresponse */
 #define    MC_CMD_GET_PHY_STATE_OUT_LEN 4
 #define       MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4
 /* enum: Ok. */
 #define          MC_CMD_PHY_STATE_OK 0x1
 /* enum: Faulty. */
 /* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
 #define    MC_CMD_WOL_FILTER_GET_OUT_LEN 4
 #define       MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
 #define          MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
 #define          MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS  0x2 /* enum */
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
 /*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
 
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
 /*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
 
 
 /***********************************/
 /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
 #define    MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
 #define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
 #define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+#define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
 
 /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
 #define    MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
 #define    MC_CMD_TESTASSERT_V2_IN_LEN 4
 /* How to provoke the assertion */
 #define       MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+#define       MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
 /* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
  * you're testing firmware, this is what you want.
  */
 #define    MC_CMD_WORKAROUND_IN_LEN 8
 /* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
 #define       MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define       MC_CMD_WORKAROUND_IN_TYPE_LEN 4
 /* enum: Bug 17230 work around. */
 #define          MC_CMD_WORKAROUND_BUG17230 0x1
 /* enum: Bug 35388 work around (unsafe EVQ writes). */
  * the workaround
  */
 #define       MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define       MC_CMD_WORKAROUND_IN_ENABLED_LEN 4
 
 /* MC_CMD_WORKAROUND_OUT msgresponse */
 #define    MC_CMD_WORKAROUND_OUT_LEN 0
  */
 #define    MC_CMD_WORKAROUND_EXT_OUT_LEN 4
 #define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
 #define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
 #define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
 
 /* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
 #define    MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define       MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
 
 /* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
 /* in bytes */
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
 /* MC_CMD_NVRAM_TEST_IN msgrequest */
 #define    MC_CMD_NVRAM_TEST_IN_LEN 4
 #define       MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 
 /* MC_CMD_NVRAM_TEST_OUT msgresponse */
 #define    MC_CMD_NVRAM_TEST_OUT_LEN 4
 #define       MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define       MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4
 /* enum: Passed. */
 #define          MC_CMD_NVRAM_TEST_PASS 0x0
 /* enum: Failed. */
 #define    MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
 /* 0-6 low->high de-emph. */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
 /* 0-8 low->high ref.V */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
 /* 0-8 0-8 low->high boost */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
 /* 0-8 low->high ref.V */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
 
 /* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
 #define    MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
 #define    MC_CMD_MRSFP_TWEAK_OUT_LEN 12
 /* input bits */
 #define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
 /* output bits */
 #define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
 /* direction */
 #define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
 /* enum: Out. */
 #define          MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
 /* enum: In. */
  */
 #define MC_CMD_SENSOR_SET_LIMS 0x4e
 
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
 #define    MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
 #define       MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define       MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define       MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define       MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define       MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+#define       MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
 
 /* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
 #define    MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
 /* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
 #define    MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
 /* total number of partitions */
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+#define       MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
 /* type ID code for each of NUM_PARTITIONS partitions */
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
 #define    MC_CMD_NVRAM_METADATA_IN_LEN 4
 /* Partition type ID code */
 #define       MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4
 
 /* MC_CMD_NVRAM_METADATA_OUT msgresponse */
 #define    MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
 #define    MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
 /* Partition type ID code */
 #define       MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
 #define       MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define       MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
 #define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
 #define        MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
 #define        MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
 /* Subtype ID code for content of this partition */
 #define       MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+#define       MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4
 /* 1st component of W.X.Y.Z version number for content of this partition */
 #define       MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
 #define       MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
 /* Number of allocated MAC addresses */
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4
 /* Spacing of allocated MAC addresses */
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_CLP_IN_LEN 4
 /* Sub operation */
 #define       MC_CMD_CLP_IN_OP_OFST 0
+#define       MC_CMD_CLP_IN_OP_LEN 4
 /* enum: Return to factory default settings */
 #define          MC_CMD_CLP_OP_DEFAULT 0x1
 /* enum: Set MAC address */
 /* MC_CMD_CLP_IN_DEFAULT msgrequest */
 #define    MC_CMD_CLP_IN_DEFAULT_LEN 4
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 
 /* MC_CMD_CLP_OUT_DEFAULT msgresponse */
 #define    MC_CMD_CLP_OUT_DEFAULT_LEN 0
 /* MC_CMD_CLP_IN_SET_MAC msgrequest */
 #define    MC_CMD_CLP_IN_SET_MAC_LEN 12
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 /* MAC address assigned to port */
 #define       MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
 #define       MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
 /* MC_CMD_CLP_IN_GET_MAC msgrequest */
 #define    MC_CMD_CLP_IN_GET_MAC_LEN 4
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 
 /* MC_CMD_CLP_OUT_GET_MAC msgresponse */
 #define    MC_CMD_CLP_OUT_GET_MAC_LEN 8
 /* MC_CMD_CLP_IN_SET_BOOT msgrequest */
 #define    MC_CMD_CLP_IN_SET_BOOT_LEN 5
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 /* Boot flag */
 #define       MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
 #define       MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
 /* MC_CMD_CLP_IN_GET_BOOT msgrequest */
 #define    MC_CMD_CLP_IN_GET_BOOT_LEN 4
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 
 /* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
 #define    MC_CMD_CLP_OUT_GET_BOOT_LEN 4
  */
 #define MC_CMD_MUM 0x57
 
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_MUM_IN msgrequest */
 #define    MC_CMD_MUM_IN_LEN 4
 #define       MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define       MC_CMD_MUM_IN_OP_HDR_LEN 4
 #define        MC_CMD_MUM_IN_OP_LBN 0
 #define        MC_CMD_MUM_IN_OP_WIDTH 8
 /* enum: NULL MCDI command to MUM */
 #define    MC_CMD_MUM_IN_NULL_LEN 4
 /* MUM cmd header */
 #define       MC_CMD_MUM_IN_CMD_OFST 0
+#define       MC_CMD_MUM_IN_CMD_LEN 4
 
 /* MC_CMD_MUM_IN_GET_VERSION msgrequest */
 #define    MC_CMD_MUM_IN_GET_VERSION_LEN 4
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 
 /* MC_CMD_MUM_IN_READ msgrequest */
 #define    MC_CMD_MUM_IN_READ_LEN 16
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* ID of (device connected to MUM) to read from registers of */
 #define       MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+#define       MC_CMD_MUM_IN_READ_DEVICE_LEN 4
 /* enum: Hittite HMC1035 clock generator on Sorrento board */
 #define          MC_CMD_MUM_DEV_HITTITE 0x1
 /* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
 #define          MC_CMD_MUM_DEV_HITTITE_NIC 0x2
 /* 32-bit address to read from */
 #define       MC_CMD_MUM_IN_READ_ADDR_OFST 8
+#define       MC_CMD_MUM_IN_READ_ADDR_LEN 4
 /* Number of words to read. */
 #define       MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+#define       MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
 
 /* MC_CMD_MUM_IN_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_WRITE_LENMIN 16
 #define    MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* ID of (device connected to MUM) to write to registers of */
 #define       MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+#define       MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
 /* enum: Hittite HMC1035 clock generator on Sorrento board */
 /*               MC_CMD_MUM_DEV_HITTITE 0x1 */
 /* 32-bit address to write to */
 #define       MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+#define       MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
 /* Words to write */
 #define       MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
 #define       MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
 #define    MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* MUM I2C cmd code */
 #define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+#define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
 /* Number of bytes to write */
 #define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
 /* Number of bytes to read */
 #define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
 /* Bytes to write */
 #define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
 #define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
 #define    MC_CMD_MUM_IN_LOG_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define       MC_CMD_MUM_IN_LOG_OP_LEN 4
 #define          MC_CMD_MUM_IN_LOG_OP_UART  0x1 /* enum */
 
 /* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
 #define    MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /*            MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/*            MC_CMD_MUM_IN_LOG_OP_LEN 4 */
 /* Enable/disable debug output to UART */
 #define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+#define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
 #define        MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
 #define          MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
 /* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
 /* The first 32-bit word to be written to the GPIO OUT register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
 /* The second 32-bit word to be written to the GPIO OUT register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
 /* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
 /* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OP msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
 #define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
 #define          MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
 
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
 
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
 
 #define    MC_CMD_MUM_IN_READ_SENSORS_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
 #define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
 #define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
 #define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
 #define    MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* Bit-mask of clocks to be programmed */
 #define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
 #define          MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
 #define          MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
 #define          MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
 /* Control flags for clock programming */
 #define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
 #define    MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* Enable/Disable FPGA config from flash */
 #define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+#define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
 
 /* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
 #define    MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 
 /* MC_CMD_MUM_IN_QSFP msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_LEN 12
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_HDR_LEN 4
 #define        MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
 #define        MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
 #define          MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
 #define          MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
 #define          MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
 #define       MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_INIT_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
 #define    MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 
 /* MC_CMD_MUM_OUT msgresponse */
 #define    MC_CMD_MUM_OUT_LEN 0
 /* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
 #define    MC_CMD_MUM_OUT_GET_VERSION_LEN 12
 #define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
 #define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
 #define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
 #define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
 #define    MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
 /* The first 32-bit word read from the GPIO IN register. */
 #define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
 /* The second 32-bit word read from the GPIO IN register. */
 #define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
 #define    MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
 /* The first 32-bit word read from the GPIO OUT register. */
 #define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
 /* The second 32-bit word read from the GPIO OUT register. */
 #define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
 /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
 #define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
 #define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
 #define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
 /* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
 #define    MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
 #define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+#define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
 
 /* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
 #define    MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
 /* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
 #define    MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
 #define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+#define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
 
 /* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
 /* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
 #define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
 /* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
 
 /* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
 #define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
 /* in bytes */
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
 /* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
 #define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
 
 /* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
 
 /* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
 #define    MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
 #define    MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
 /* Discrete (soldered) DDR resistor strap info */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
 /* Number of SODIMM info records */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
 /* Array of SODIMM info records */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
 /* EVB_PORT_ID structuredef */
 #define    EVB_PORT_ID_LEN 4
 #define       EVB_PORT_ID_PORT_ID_OFST 0
+#define       EVB_PORT_ID_PORT_ID_LEN 4
 /* enum: An invalid port handle. */
 #define          EVB_PORT_ID_NULL  0x0
 /* enum: The port assigned to this function.. */
 #define          NVRAM_PARTITION_TYPE_FC_LOG               0xb04
 /* enum: MUM firmware partition */
 #define          NVRAM_PARTITION_TYPE_MUM_FIRMWARE         0xc00
+/* enum: SUC firmware partition (this is intentionally an alias of
+ * MUM_FIRMWARE)
+ */
+#define          NVRAM_PARTITION_TYPE_SUC_FIRMWARE         0xc00
 /* enum: MUM Non-volatile log output partition. */
 #define          NVRAM_PARTITION_TYPE_MUM_LOG              0xc01
 /* enum: MUM Application table partition. */
 #define          NVRAM_PARTITION_TYPE_MUM_FUSELOCK         0xc06
 /* enum: UEFI expansion ROM if separate from PXE */
 #define          NVRAM_PARTITION_TYPE_EXPANSION_UEFI       0xd00
-/* enum: Spare partition 0 */
-#define          NVRAM_PARTITION_TYPE_SPARE_0              0x1000
+/* enum: Used by the expansion ROM for logging */
+#define          NVRAM_PARTITION_TYPE_PXE_LOG              0x1000
 /* enum: Used for XIP code of shmbooted images */
 #define          NVRAM_PARTITION_TYPE_XIP_SCRATCH          0x1100
 /* enum: Spare partition 2 */
 #define          NVRAM_PARTITION_TYPE_SPARE_4              0x1400
 /* enum: Spare partition 5 */
 #define          NVRAM_PARTITION_TYPE_SPARE_5              0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define          NVRAM_PARTITION_TYPE_STATUS               0x1600
+/* enum: Spare partition 13 */
+#define          NVRAM_PARTITION_TYPE_SPARE_13              0x1700
+/* enum: Spare partition 14 */
+#define          NVRAM_PARTITION_TYPE_SPARE_14              0x1800
+/* enum: Spare partition 15 */
+#define          NVRAM_PARTITION_TYPE_SPARE_15              0x1900
+/* enum: Spare partition 16 */
+#define          NVRAM_PARTITION_TYPE_SPARE_16              0x1a00
+/* enum: Factory defaults for dynamic configuration */
+#define          NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS    0x1b00
+/* enum: Factory defaults for expansion ROM configuration */
+#define          NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS    0x1c00
+/* enum: Field Replaceable Unit inventory information for use on IPMI
+ * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a
+ * subset of the information stored in this partition.
+ */
+#define          NVRAM_PARTITION_TYPE_FRU_INFORMATION       0x1d00
 /* enum: Start of reserved value range (firmware may use for any purpose) */
 #define          NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN  0xff00
 /* enum: End of reserved value range (firmware may use for any purpose) */
 /* LICENSED_APP_ID structuredef */
 #define    LICENSED_APP_ID_LEN 4
 #define       LICENSED_APP_ID_ID_OFST 0
+#define       LICENSED_APP_ID_ID_LEN 4
 /* enum: OpenOnload */
 #define          LICENSED_APP_ID_ONLOAD                  0x1
 /* enum: PTP timestamping */
 #define          LICENSED_APP_ID_SOLARCAPTURE_TAP        0x400
 /* enum: Capture SolarSystem 40G */
 #define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G  0x1000
+/* enum: ScaleOut Onload */
+#define          LICENSED_APP_ID_SCALEOUT_ONLOAD         0x2000
+/* enum: SCS Network Analytics Dashboard */
+#define          LICENSED_APP_ID_DSHBRD                  0x4000
+/* enum: SolarCapture Trading Analytics */
+#define          LICENSED_APP_ID_SCATRD                  0x8000
 #define       LICENSED_APP_ID_ID_LBN 0
 #define       LICENSED_APP_ID_ID_WIDTH 32
 
 #define        LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
+#define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define        LICENSED_V3_APPS_DSHBRD_LBN 14
+#define        LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define        LICENSED_V3_APPS_SCATRD_LBN 15
+#define        LICENSED_V3_APPS_SCATRD_WIDTH 1
 #define       LICENSED_V3_APPS_MASK_LBN 0
 #define       LICENSED_V3_APPS_MASK_WIDTH 64
 
 #define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
 /* enum: This is a TX completion event, not a timestamp */
 #define          TX_TIMESTAMP_EVENT_TX_EV_COMPLETION  0x0
+/* enum: This is a TX completion event for a CTPIO transmit. The event format
+ * is the same as for TX_EV_COMPLETION.
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION  0x11
+/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_LO
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO  0x12
+/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_HI
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI  0x13
 /* enum: This is the low part of a TX timestamp event */
 #define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO  0x51
 /* enum: This is the high part of a TX timestamp event */
 #define       RSS_MODE_HASH_SELECTOR_LBN 0
 #define       RSS_MODE_HASH_SELECTOR_WIDTH 8
 
+/* CTPIO_STATS_MAP structuredef */
+#define    CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define       CTPIO_STATS_MAP_VI_OFST 0
+#define       CTPIO_STATS_MAP_VI_LEN 2
+#define       CTPIO_STATS_MAP_VI_LBN 0
+#define       CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define       CTPIO_STATS_MAP_BUCKET_OFST 2
+#define       CTPIO_STATS_MAP_BUCKET_LEN 2
+#define       CTPIO_STATS_MAP_BUCKET_LBN 16
+#define       CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
 
 /***********************************/
 /* MC_CMD_READ_REGS
  */
 #define MC_CMD_READ_REGS 0x50
 
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_READ_REGS_IN msgrequest */
 #define    MC_CMD_READ_REGS_IN_LEN 0
 #define    MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define       MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
 /* The initial timer value. The load value is ignored if the timer mode is DIS.
  */
 #define       MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define       MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4
 /* The reload value is ignored in one-shot modes */
 #define       MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define       MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4
 /* tbd */
 #define       MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
 #define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
 #define        MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
 #define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
 #define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
 #define       MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define       MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
 /* enum: Immediate */
 #define          MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
 /* Target EVQ for wakeups if in wakeup mode. */
 #define       MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define       MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4
 /* Target interrupt if in interrupting mode (note union with target EVQ). Use
  * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
  * purposes.
  */
 #define       MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define       MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4
 /* Event Counter Mode. */
 #define       MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+#define       MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
 /* Event queue packet count threshold. */
 #define       MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+#define       MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
 #define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
 #define    MC_CMD_INIT_EVQ_OUT_LEN 4
 /* Only valid if INTRFLAG was true */
 #define       MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+#define       MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4
 
 /* MC_CMD_INIT_EVQ_V2_IN msgrequest */
 #define    MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
 #define    MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+#define       MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
 /* The initial timer value. The load value is ignored if the timer mode is DIS.
  */
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4
 /* The reload value is ignored in one-shot modes */
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4
 /* tbd */
 #define       MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
  */
 #define          MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
 /* enum: Immediate */
 #define          MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
 /* Target EVQ for wakeups if in wakeup mode. */
 #define       MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+#define       MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4
 /* Target interrupt if in interrupting mode (note union with target EVQ). Use
  * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
  * purposes.
  */
 #define       MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+#define       MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4
 /* Event Counter Mode. */
 #define       MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+#define       MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
 /* Event queue packet count threshold. */
 #define       MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+#define       MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
 #define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
 #define    MC_CMD_INIT_EVQ_V2_OUT_LEN 8
 /* Only valid if INTRFLAG was true */
 #define       MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+#define       MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4
 /* Actual configuration applied on the card */
 #define       MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define       MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
 #define    MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to INIT_EVQ
  */
 #define       MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
 #define        MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
 #define    MC_CMD_INIT_RXQ_EXT_IN_LEN 544
 /* Size, in entries */
 #define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to INIT_EVQ
  */
 #define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
 #define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
 /* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
 #define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+#define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
 
 /* MC_CMD_INIT_RXQ_OUT msgresponse */
 #define    MC_CMD_INIT_RXQ_OUT_LEN 0
 #define    MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to
  * INIT_EVQ.
  */
 #define       MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
 #define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
 #define    MC_CMD_INIT_TXQ_EXT_IN_LEN 544
 /* Size, in entries */
 #define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to
  * INIT_EVQ.
  */
 #define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
 /* Flags related to Qbb flow control mode. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
  * passed to INIT_EVQ
  */
 #define       MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4
 
 /* MC_CMD_FINI_EVQ_OUT msgresponse */
 #define    MC_CMD_FINI_EVQ_OUT_LEN 0
 #define    MC_CMD_FINI_RXQ_IN_LEN 4
 /* Instance of RXQ to destroy */
 #define       MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4
 
 /* MC_CMD_FINI_RXQ_OUT msgresponse */
 #define    MC_CMD_FINI_RXQ_OUT_LEN 0
 #define    MC_CMD_FINI_TXQ_IN_LEN 4
 /* Instance of TXQ to destroy */
 #define       MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4
 
 /* MC_CMD_FINI_TXQ_OUT msgresponse */
 #define    MC_CMD_FINI_TXQ_OUT_LEN 0
 #define    MC_CMD_DRIVER_EVENT_IN_LEN 12
 /* Handle of target EVQ */
 #define       MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define       MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4
 /* Bits 0 - 63 of event */
 #define       MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
 #define       MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
 #define    MC_CMD_PROXY_CMD_IN_LEN 4
 /* The handle of the target function. */
 #define       MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define       MC_CMD_PROXY_CMD_IN_TARGET_LEN 4
 #define        MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
 #define        MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
 #define        MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
 #define    MC_PROXY_STATUS_BUFFER_LEN 16
 /* Handle allocated by the firmware for this proxy transaction */
 #define       MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
 /* enum: An invalid handle. */
 #define          MC_PROXY_STATUS_BUFFER_HANDLE_INVALID  0x0
 #define       MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
  * elevated privilege mask granted to the requesting function.
  */
 #define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
 #define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
 #define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
 
 /* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
 #define    MC_CMD_PROXY_CONFIGURE_IN_LEN 108
 #define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
 #define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
 #define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
 #define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size REPLY_BLOCK_SIZE.
  */
 #define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
  * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
 #define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
 /* Must be a power of 2, or zero if this buffer is not provided */
 #define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
 /* Applies to all three buffers */
 #define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+#define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
 /* A bit mask defining which MCDI operations may be proxied */
 #define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
 #define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
 /* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
 #define    MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
 #define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size REPLY_BLOCK_SIZE.
  */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
  * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
 /* Must be a power of 2, or zero if this buffer is not provided */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
 /* Applies to all three buffers */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
 /* A bit mask defining which MCDI operations may be proxied */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
 
 /* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
 #define    MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
 /* MC_CMD_PROXY_COMPLETE_IN msgrequest */
 #define    MC_CMD_PROXY_COMPLETE_IN_LEN 12
 #define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
 #define       MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+#define       MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
 /* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
  * is stored in the REPLY_BUFF.
  */
  */
 #define          MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
 #define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+#define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
 
 /* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
 #define    MC_CMD_PROXY_COMPLETE_OUT_LEN 0
 #define    MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
 /* Owner ID to use */
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
 /* Size of buffer table pages to use, in bytes (note that only a few values are
  * legal on any specific hardware).
  */
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
 
 /* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
 #define    MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
 /* Buffer table IDs for use in DMA descriptors. */
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
 /* ID */
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
 /* Num entries */
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
 /* Buffer table entry address */
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
 /* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
 #define       MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+#define       MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
 
 /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
 
-/* PORT_CONFIG_ENTRY structuredef */
-#define    PORT_CONFIG_ENTRY_LEN 16
-/* External port number (label) */
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
-/* Port core location */
-#define       PORT_CONFIG_ENTRY_CORE_OFST 1
-#define       PORT_CONFIG_ENTRY_CORE_LEN 1
-#define          PORT_CONFIG_ENTRY_STANDALONE  0x0 /* enum */
-#define          PORT_CONFIG_ENTRY_MASTER  0x1 /* enum */
-#define          PORT_CONFIG_ENTRY_SLAVE  0x2 /* enum */
-#define       PORT_CONFIG_ENTRY_CORE_LBN 8
-#define       PORT_CONFIG_ENTRY_CORE_WIDTH 8
-/* Internal number (HW resource) relative to the core */
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
-/* Reserved */
-#define       PORT_CONFIG_ENTRY_RSVD_OFST 3
-#define       PORT_CONFIG_ENTRY_RSVD_LEN 1
-#define       PORT_CONFIG_ENTRY_RSVD_LBN 24
-#define       PORT_CONFIG_ENTRY_RSVD_WIDTH 8
-/* Bitmask of KR lanes used by the port */
-#define       PORT_CONFIG_ENTRY_LANES_OFST 4
-#define       PORT_CONFIG_ENTRY_LANES_LBN 32
-#define       PORT_CONFIG_ENTRY_LANES_WIDTH 32
-/* Port capabilities (MC_CMD_PHY_CAP_*) */
-#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
-#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
-#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
-/* Reserved (align to 16 bytes) */
-#define       PORT_CONFIG_ENTRY_RSVD2_OFST 12
-#define       PORT_CONFIG_ENTRY_RSVD2_LBN 96
-#define       PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
-
 
 /***********************************/
 /* MC_CMD_FILTER_OP
 #define    MC_CMD_FILTER_OP_IN_LEN 108
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define       MC_CMD_FILTER_OP_IN_OP_LEN 4
 /* enum: single-recipient filter insert */
 #define          MC_CMD_FILTER_OP_IN_OP_INSERT  0x0
 /* enum: single-recipient filter remove */
 /* The port ID associated with the v-adaptor which should contain this filter.
  */
 #define       MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+#define       MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4
 /* fields to include in match criteria */
 #define       MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define       MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
 #define        MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
 #define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
 /* receive destination */
 #define       MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+#define       MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4
 /* enum: drop packets */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_DROP  0x0
 /* enum: receive to host */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_TX1  0x4
 /* receive queue handle (for multiple queue modes, this is the base queue) */
 #define       MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+#define       MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+#define       MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
  * MC_CMD_DOT1P_MAPPING_ALLOC.
  */
 #define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+#define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4
 /* transmit domain (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+#define       MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4
 /* transmit destination (either set the MAC and/or PM bits for explicit
  * control, or set this field to TX_DEST_DEFAULT for sensible default
  * behaviour)
  */
 #define       MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+#define       MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
 /* enum: request default behaviour (based on filter type) */
 #define          MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT  0xffffffff
 #define        MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
 #define       MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
 /* Firmware defined register 0 to match (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+#define       MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4
 /* Firmware defined register 1 to match (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+#define       MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4
 /* source IP address to match (as bytes in network order; set last 12 bytes to
  * 0 for IPv4 address)
  */
 #define    MC_CMD_FILTER_OP_EXT_IN_LEN 172
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+#define       MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_FILTER_OP_IN/OP */
 /* filter handle (for remove / unsubscribe operations) */
 /* The port ID associated with the v-adaptor which should contain this filter.
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+#define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4
 /* fields to include in match criteria */
 #define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
 /* receive destination */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4
 /* enum: drop packets */
 #define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP  0x0
 /* enum: receive to host */
 #define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1  0x4
 /* receive queue handle (for multiple queue modes, this is the base queue) */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
  * MC_CMD_DOT1P_MAPPING_ALLOC.
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4
 /* transmit domain (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4
 /* transmit destination (either set the MAC and/or PM bits for explicit
  * control, or set this field to TX_DEST_DEFAULT for sensible default
  * behaviour)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
 /* enum: request default behaviour (based on filter type) */
 #define          MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT  0xffffffff
 #define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
 #define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
 /* Firmware defined register 0 to match (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+#define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4
 /* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
  * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
  * VXLAN/NVGRE, or 1 for Geneve)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
  * to 0)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4
 /* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
  * to 0)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4
 /* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
  * order; set last 12 bytes to 0 for IPv4 address)
  */
 #define    MC_CMD_FILTER_OP_OUT_LEN 12
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define       MC_CMD_FILTER_OP_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_FILTER_OP_IN/OP */
 /* Returned filter handle (for insert / subscribe operations). Note that these
 #define    MC_CMD_FILTER_OP_EXT_OUT_LEN 12
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+#define       MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_FILTER_OP_EXT_IN/OP */
 /* Returned filter handle (for insert / subscribe operations). Note that these
 #define    MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
 /* identifies the type of operation requested */
 #define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4
 /* enum: read the list of supported RX filter matches */
 #define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES  0x1
 /* enum: read flags indicating restrictions on filter insertion for the calling
 #define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
 /* identifies the type of operation requested */
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
 /* number of supported match types */
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4
 /* array of supported match types (valid MATCH_FIELDS values for
  * MC_CMD_FILTER_OP) sorted in decreasing priority order
  */
 #define    MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
 /* identifies the type of operation requested */
 #define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
 /* bitfield of filter insertion restrictions */
 #define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
 #define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
 #define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
 
 #define    MC_CMD_PARSER_DISP_RW_IN_LEN 32
 /* identifies the target of the operation */
 #define       MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+#define       MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
 /* enum: RX dispatcher CPU */
 #define          MC_CMD_PARSER_DISP_RW_IN_RX_DICPU  0x0
 /* enum: TX dispatcher CPU */
 #define          MC_CMD_PARSER_DISP_RW_IN_TX_DICPU  0x1
-/* enum: Lookup engine (with original metadata format) */
+/* enum: Lookup engine (with original metadata format). Deprecated; used only
+ * by cmdclient as a fallback for very old Huntington firmware, and not
+ * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
+ * instead.
+ */
 #define          MC_CMD_PARSER_DISP_RW_IN_LUE  0x2
 /* enum: Lookup engine (with requested metadata format) */
 #define          MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA  0x3
 #define          MC_CMD_PARSER_DISP_RW_IN_MISC_STATE  0x5
 /* identifies the type of operation requested */
 #define       MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-/* enum: read a word of DICPU DMEM or a LUE entry */
+#define       MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
+/* enum: Read a word of DICPU DMEM or a LUE entry */
 #define          MC_CMD_PARSER_DISP_RW_IN_READ  0x0
-/* enum: write a word of DICPU DMEM or a LUE entry */
+/* enum: Write a word of DICPU DMEM or a LUE entry. */
 #define          MC_CMD_PARSER_DISP_RW_IN_WRITE  0x1
-/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). */
 #define          MC_CMD_PARSER_DISP_RW_IN_RMW  0x2
 /* data memory address (DICPU targets) or LUE index (LUE targets) */
 #define       MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+#define       MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
 /* selector (for MISC_STATE target) */
 #define       MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+#define       MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
 /* enum: Port to datapath mapping */
 #define          MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING  0x1
 /* value to write (for DMEM writes) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
 /* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
 /* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
 /* value to write (for LUE writes) */
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
 #define    MC_CMD_PARSER_DISP_RW_OUT_LEN 52
 /* value read (for DMEM reads) */
 #define       MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+#define       MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
 /* value read (for LUE reads) */
 #define       MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
 #define       MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
 #define    MC_CMD_SET_PF_COUNT_IN_LEN 4
 /* New number of PFs on the device. */
 #define       MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+#define       MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
 
 /* MC_CMD_SET_PF_COUNT_OUT msgresponse */
 #define    MC_CMD_SET_PF_COUNT_OUT_LEN 0
 #define    MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
 /* Identifies the port assignment for this function. */
 #define       MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+#define       MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
 /* Identifies the port assignment for this function. */
 #define       MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+#define       MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
 
 /* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
 #define    MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
 #define    MC_CMD_ALLOC_VIS_IN_LEN 8
 /* The minimum number of VIs that is acceptable */
 #define       MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4
 /* The maximum number of VIs that would be useful */
 #define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+#define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4
 
 /* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
  * Use extended version in new code.
 #define    MC_CMD_ALLOC_VIS_OUT_LEN 8
 /* The number of VIs allocated on this function */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+#define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4
 
 /* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
 #define    MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
 /* The number of VIs allocated on this function */
 #define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4
 /* Function's port vi_shift value (always 0 on Huntington) */
 #define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
 /* Number of VFs currently enabled. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4
 /* Max number of VFs before sriov stride and offset may need to be changed. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
 #define       MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define       MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
 #define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
 /* RID offset of first VF from PF. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4
 /* RID offset of each subsequent VF from the previous. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_SET_SRIOV_CFG_IN_LEN 20
 /* Number of VFs currently enabled. */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
 /* Max number of VFs before sriov stride and offset may need to be changed. */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
 #define       MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define       MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
 #define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
 /* RID offset of first VF from PF, or 0 for no change, or
  * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
  */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
 /* RID offset of each subsequent VF from the previous, 0 for no change, or
  * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
  */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
 
 /* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
 #define    MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
 #define    MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
 /* The number of VIs allocated on this function */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
 /* Function's port vi_shift value (always 0 on Huntington) */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_DUMP_VI_STATE_IN_LEN 4
 /* The VI number to query. */
 #define       MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+#define       MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
 
 /* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
 #define    MC_CMD_DUMP_VI_STATE_OUT_LEN 96
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
 /* Combined metadata field. */
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
 #define    MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
 /* Handle for allocated push I/O buffer. */
 #define       MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_FREE_PIOBUF_IN_LEN 4
 /* Handle for allocated push I/O buffer. */
 #define       MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
 
 /* MC_CMD_FREE_PIOBUF_OUT msgresponse */
 #define    MC_CMD_FREE_PIOBUF_OUT_LEN 0
 #define    MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
 /* VI number to get information for. */
 #define       MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define       MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
 
 /* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
 #define    MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
 #define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
 #define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
 #define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
 /* VI number to set information for. */
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
 /* Transaction processing steering hint 1 for use with the Rx Queue. */
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
 
 /* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
 #define    MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
 /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
 #define    MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
 /* enum: MISC. */
 #define          MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC  0x0
 /* enum: IDO. */
 /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
 #define    MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
 /* Amalgamated TLP info word. */
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
 /* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
 #define    MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
 #define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
 /* Amalgamated TLP info word. */
 #define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
  * in a command from the host.)
  */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE     0x0 /* enum */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET    0x1 /* enum */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS    0x2 /* enum */
  * mc_flash_layout.h.)
  */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
 /* enum: Valid in phase 2 (PHASE_IMEMS) only */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT  0x0
 /* enum: Valid in phase 2 (PHASE_IMEMS) only */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL  0xffffffff
 /* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
 /* enum: Last chunk, containing checksum rather than data */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST  0xffffffff
 /* enum: Abort download of this item */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT  0xfffffffe
 /* Length of this chunk in bytes */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
 /* Data for this chunk */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
 #define    MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
 /* Same as MC_CMD_ERR field, but included as 0 in success cases */
 #define       MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+#define       MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
 /* Extra status information */
 #define       MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+#define       MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
 /* enum: Code download OK, completed. */
 #define          MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE  0x0
 /* enum: Code download aborted as requested. */
 #define    MC_CMD_GET_CAPABILITIES_OUT_LEN 20
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY  0x1
 /* enum: Packed stream RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE  0x5
 /* enum: BIST RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
 /* enum: RXDP Test firmware image 9 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW  0x10c
 /* TxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
 #define       MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY  0x1
 /* enum: High packet rate TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE  0x5
 /* enum: BIST TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant RX PD firmware using PM rather than MAC
  * (Huntington development only)
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine RX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* enum: RX PD firmware parsing but not filtering network overlay tunnel
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant TX PD firmware using PM rather than MAC
  * (Huntington development only)
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine TX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4
 /* Licensed capabilities */
 #define       MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4
 
 /* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
 #define    MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
 #define    MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY  0x1
 /* enum: Packed stream RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE  0x5
 /* enum: BIST RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
 /* enum: RXDP Test firmware image 9 */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW  0x10c
 /* TxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY  0x1
 /* enum: High packet rate TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE  0x5
 /* enum: BIST TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant RX PD firmware using PM rather than MAC
  * (Huntington development only)
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine RX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* enum: RX PD firmware parsing but not filtering network overlay tunnel
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant TX PD firmware using PM rather than MAC
  * (Huntington development only)
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine TX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4
 /* Licensed capabilities */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4
 /* Second word of flags. Not present on older firmware (check the length). */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
  * on older firmware (check the length).
  */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
 
 /* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
-#define    MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 73
+#define    MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY  0x1
 /* enum: Packed stream RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE  0x5
 /* enum: BIST RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
 /* enum: RXDP Test firmware image 9 */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW  0x10c
 /* TxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY  0x1
 /* enum: High packet rate TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE  0x5
 /* enum: BIST TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant RX PD firmware using PM rather than MAC
  * (Huntington development only)
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine RX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* enum: RX PD firmware parsing but not filtering network overlay tunnel
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant TX PD firmware using PM rather than MAC
  * (Huntington development only)
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine TX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4
 /* Licensed capabilities */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4
 /* Second word of flags. Not present on older firmware (check the length). */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
  * on older firmware (check the length).
  */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K  0x1
 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K  0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP  0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY  0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE  0x5
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST  0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH  0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD  0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST  0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE  0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE  0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS  0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT  0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW  0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP  0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY  0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE  0x5
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST  0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT  0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS  0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR  0x103
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY  0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM  0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY  0xf
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY  0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED  0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT  0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED  0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT  0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED  0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT  0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K   0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K  0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K  0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
 
 
 /***********************************/
 #define    MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
 
 /* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
 #define    MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
 #define    MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
 /* the rate in mbps */
 #define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+#define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
 
 /* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
 #define    MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
 /* the rate in mbps */
 #define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
 /* the desired maximum fill level */
 #define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
 
 /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
 #define    MC_CMD_TCM_TXQ_INIT_IN_LEN 28
 /* the txq id */
 #define       MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+#define       MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+#define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
 /* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
 #define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+#define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
 /* an already reserved bucket (typically set to bucket associated with outer
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+#define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
 /* an already reserved bucket (typically set to bucket associated with inner
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+#define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
 /* the min bucket (typically for ETS/minimum bandwidth) */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+#define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
 
 /* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
 #define    MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
 /* the txq id */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
 /* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
 /* an already reserved bucket (typically set to bucket associated with outer
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
 /* an already reserved bucket (typically set to bucket associated with inner
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
 /* the min bucket (typically for ETS/minimum bandwidth) */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
 
 /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
 #define    MC_CMD_LINK_PIOBUF_IN_LEN 8
 /* Handle for allocated push I/O buffer. */
 #define       MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
 /* Function Local Instance (VI) number. */
 #define       MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+#define       MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
 
 /* MC_CMD_LINK_PIOBUF_OUT msgresponse */
 #define    MC_CMD_LINK_PIOBUF_OUT_LEN 0
 #define    MC_CMD_UNLINK_PIOBUF_IN_LEN 4
 /* Function Local Instance (VI) number. */
 #define       MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+#define       MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
 
 /* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
 #define    MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
 #define    MC_CMD_VSWITCH_ALLOC_IN_LEN 16
 /* The port to connect to the v-switch's upstream port. */
 #define       MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The type of v-switch to create. */
 #define       MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4
 /* enum: VLAN */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN  0x1
 /* enum: VEB */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST  0x5
 /* Flags controlling v-port creation */
 #define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
 /* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
  * v-ports with this number of tags.
  */
 #define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
 
 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
 #define    MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
 #define    MC_CMD_VSWITCH_FREE_IN_LEN 4
 /* The port to which the v-switch is connected. */
 #define       MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VSWITCH_FREE_OUT msgresponse */
 #define    MC_CMD_VSWITCH_FREE_OUT_LEN 0
 #define    MC_CMD_VSWITCH_QUERY_IN_LEN 4
 /* The port to which the v-switch is connected. */
 #define       MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
 #define    MC_CMD_VSWITCH_QUERY_OUT_LEN 0
 #define    MC_CMD_VPORT_ALLOC_IN_LEN 20
 /* The port to which the v-switch is connected. */
 #define       MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The type of the new v-port. */
 #define       MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4
 /* enum: VLAN (obsolete) */
 #define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN  0x1
 /* enum: VEB (obsolete) */
 #define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST  0x6
 /* Flags controlling v-port creation */
 #define       MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
  * v-switch.
  */
 #define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
 #define    MC_CMD_VPORT_ALLOC_OUT_LEN 4
 /* The handle of the new v-port */
 #define       MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_VPORT_FREE_IN_LEN 4
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4
 
 /* MC_CMD_VPORT_FREE_OUT msgresponse */
 #define    MC_CMD_VPORT_FREE_OUT_LEN 0
 #define    MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
 /* The port to connect to the v-adaptor's port. */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* Flags controlling v-adaptor creation */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
 /* The number of VLAN tags to strip on receive */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4
 /* The number of VLAN tags to transparently insert/remove. */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
 #define    MC_CMD_VADAPTOR_FREE_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_FREE_OUT_LEN 0
 #define    MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The new MAC address to assign to this v-adaptor */
 #define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
 #define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
 #define    MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
 #define    MC_CMD_VADAPTOR_QUERY_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
 #define       MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+#define       MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4
 /* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
 #define       MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+#define       MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4
 /* The number of VLAN tags that may still be added */
 #define       MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+#define       MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
 /* The port to assign. */
 #define       MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+#define       MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4
 /* The target function to modify. */
 #define       MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define       MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
 /* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
 #define    MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
 /* Write enable bits 0-3, set to write, clear to read. */
 #define       MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
 #define       MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
  */
 #define    MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
 /* The handle of the owning upstream port */
 #define       MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
 #define    MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
 /* The handle of the new Onload stack */
 #define       MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
 /* The handle of the Onload stack */
 #define       MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
 
 /* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
 #define    MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
 #define    MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
 /* The handle of the owning upstream port */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The type of context to allocate */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4
 /* enum: Allocate a context for exclusive use. The key and indirection table
  * must be explicitly configured.
  */
  * in the indirection table will be in the range 0 to NUM_QUEUES-1.
  */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
  * handle.
  */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4
 /* enum: guaranteed invalid RSS context handle value */
 #define          MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID  0xffffffff
 
 #define    MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
 #define    MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4
 /* The 40-byte Toeplitz hash key (TBD endianness issues?) */
 #define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
 #define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
 #define    MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
 #define    MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
 /* The 128-byte indirection table (1 byte per entry) */
 #define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
 #define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
 #define    MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
 /* Hash control flags. The _EN bits are always supported, but new modes are
  * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
  * in this case, the MODE fields may be set to non-zero values, and will take
  * particular packet type.)
  */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
  * always be used for a SET regardless of old/new driver vs. old/new firmware.
  */
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
 /* The handle of the owning upstream port */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* Number of queues spanned by this mapping, in the range 1-64; valid fixed
  * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
  * referenced RSS contexts must span no more than this number.
  */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
 
 /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
  * handle.
  */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
 /* enum: guaranteed invalid .1p mapping handle value */
 #define          MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID  0xffffffff
 
 #define    MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
 /* The handle of the .1p mapping */
 #define       MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
 
 /* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
 #define    MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
 /* The handle of the .1p mapping */
 #define       MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
 /* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
  * handle)
  */
 #define    MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
 /* The handle of the .1p mapping */
 #define       MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
 
 /* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
 #define    MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
 /* Base absolute interrupt vector number. */
 #define       MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
 /* Number of interrupt vectors allocate to this PF. */
 #define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
 /* Number of interrupt vectors to allocate per VF. */
 #define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
 
 
 /***********************************/
  * let the system find a suitable base.
  */
 #define       MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+#define       MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
 /* Number of interrupt vectors allocate to this PF. */
 #define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+#define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
 /* Number of interrupt vectors to allocate per VF. */
 #define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+#define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
 
 /* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
 #define    MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
 #define    MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4
 /* MAC address to add */
 #define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
 #define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
 #define    MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4
 /* MAC address to add */
 #define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
 #define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4
 
 /* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
 /* The number of MAC addresses returned */
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
 /* Array of MAC addresses */
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
 #define    MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4
 /* Flags requesting what should be changed. */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define       MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
  * v-switch.
  */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+#define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define       MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
 /* The number of MAC addresses to add */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+#define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4
 /* MAC addresses to add */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
 #define       MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
 /* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
 #define    MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
 #define       MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define       MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
 #define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
 
 #define    MC_CMD_EVB_PORT_QUERY_IN_LEN 4
 /* The handle of the v-port */
 #define       MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+#define       MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
 
 /* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
 #define    MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
 #define       MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+#define       MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
 /* The number of VLAN tags that may be used on a v-adaptor connected to this
  * EVB port.
  */
 #define       MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+#define       MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
 
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
 /* Index of the first buffer table entry. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
 /* Number of buffer table entries to dump. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
 
 /* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
 /* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
 #define    MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
 #define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
 /* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
 #define    MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
 #define       MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define       MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
 #define    MC_CMD_GET_CLOCK_OUT_LEN 8
 /* System frequency, MHz */
 #define       MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define       MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4
 /* DPCPU frequency, MHz */
 #define       MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+#define       MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_SET_CLOCK 0xad
 
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_CLOCK_IN msgrequest */
 #define    MC_CMD_SET_CLOCK_IN_LEN 28
 /* Requested frequency in MHz for system clock domain */
 #define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+#define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
 /* enum: Leave the system clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for inter-core clock domain */
 #define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+#define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
 /* enum: Leave the inter-core clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for DPCPU clock domain */
 #define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+#define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
 /* enum: Leave the DPCPU clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for PCS clock domain */
 #define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+#define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
 /* enum: Leave the PCS clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for MC clock domain */
 #define       MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+#define       MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
 /* enum: Leave the MC clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for rmon clock domain */
 #define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+#define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
 /* enum: Leave the rmon clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for vswitch clock domain */
 #define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+#define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
 /* enum: Leave the vswitch clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE  0x0
 
 #define    MC_CMD_SET_CLOCK_OUT_LEN 28
 /* Resulting system frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
 /* enum: The system clock domain doesn't exist */
 #define          MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED  0x0
 /* Resulting inter-core frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+#define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
 /* enum: The inter-core clock domain doesn't exist / isn't used */
 #define          MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED  0x0
 /* Resulting DPCPU frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+#define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
 /* enum: The dpcpu clock domain doesn't exist */
 #define          MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED  0x0
 /* Resulting PCS frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+#define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
 /* enum: The PCS clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED  0x0
 /* Resulting MC frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+#define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
 /* enum: The MC clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED  0x0
 /* Resulting rmon frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+#define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
 /* enum: The rmon clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED  0x0
 /* Resulting vswitch frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+#define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
 /* enum: The vswitch clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED  0x0
 
  */
 #define MC_CMD_DPCPU_RPC 0xae
 
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DPCPU_RPC_IN msgrequest */
 #define    MC_CMD_DPCPU_RPC_IN_LEN 36
 #define       MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+#define       MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
 /* enum: RxDPCPU0 */
 #define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX0  0x0
 /* enum: TxDPCPU0 */
 #define       MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
 /* Register data to write. Only valid in write/write-read. */
 #define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+#define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
 /* Register address. */
 #define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+#define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
 
 /* MC_CMD_DPCPU_RPC_OUT msgresponse */
 #define    MC_CMD_DPCPU_RPC_OUT_LEN 36
 #define       MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+#define       MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
 /* DATA */
 #define       MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
 #define       MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
 #define       MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
 #define       MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
 /* Interrupt level relative to base for function. */
 #define       MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+#define       MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4
 
 /* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
 #define    MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
 #define    MC_CMD_SHMBOOT_OP_IN_LEN 4
 /* Identifies the operation to perform */
 #define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+#define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
 /* enum: Copy slave_data section to the slave core. (Greenport only) */
 #define          MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA  0x0
 
  */
 #define MC_CMD_CAP_BLK_READ 0xe7
 
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CAP_BLK_READ_IN msgrequest */
 #define    MC_CMD_CAP_BLK_READ_IN_LEN 12
 #define       MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define       MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
 #define       MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define       MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
 #define       MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+#define       MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
 
 /* MC_CMD_CAP_BLK_READ_OUT msgresponse */
 #define    MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
  */
 #define MC_CMD_DUMP_DO 0xe8
 
-#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DUMP_DO_IN msgrequest */
 #define    MC_CMD_DUMP_DO_IN_LEN 52
 #define       MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define       MC_CMD_DUMP_DO_IN_PADDING_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4
 #define          MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM  0x0 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT  0x1 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM  0x1 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY  0x2 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI  0x3 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART  0x4 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define          MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE  0x1000 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define          MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH  0x2 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
 /* enum: The uart port this command was received over (if using a uart
  * transport)
  */
 #define          MC_CMD_DUMP_DO_IN_UART_PORT_SRC  0xff
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4
 #define          MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM  0x0 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION  0x1 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
 
 /* MC_CMD_DUMP_DO_OUT msgresponse */
 #define    MC_CMD_DUMP_DO_OUT_LEN 4
 #define       MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+#define       MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
 
-#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
 #define    MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_SET_PSU 0xea
 
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_PSU_IN msgrequest */
 #define    MC_CMD_SET_PSU_IN_LEN 12
 #define       MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define       MC_CMD_SET_PSU_IN_PARAM_LEN 4
 #define          MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE  0x0 /* enum */
 #define       MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define       MC_CMD_SET_PSU_IN_RAIL_LEN 4
 #define          MC_CMD_SET_PSU_IN_RAIL_0V9  0x0 /* enum */
 #define          MC_CMD_SET_PSU_IN_RAIL_1V2  0x1 /* enum */
 /* desired value, eg voltage in mV */
 #define       MC_CMD_SET_PSU_IN_VALUE_OFST 8
+#define       MC_CMD_SET_PSU_IN_VALUE_LEN 4
 
 /* MC_CMD_SET_PSU_OUT msgresponse */
 #define    MC_CMD_SET_PSU_OUT_LEN 0
 /* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
 #define    MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
 #define       MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define       MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4
 #define       MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+#define       MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
 /* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
 #define       MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+#define       MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
 /* Offset at which to write the data */
 #define       MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+#define       MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
 /* Length of data */
 #define       MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+#define       MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
 /* Reserved for future use */
 #define       MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define       MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
 #define    MC_CMD_UART_RECV_DATA_OUT_LEN 16
 /* CRC32 over OFFSET, LENGTH, RESERVED */
 #define       MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+#define       MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
 /* Offset from which to read the data */
 #define       MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+#define       MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
 /* Length of data */
 #define       MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+#define       MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
 /* Reserved for future use */
 #define       MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+#define       MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
 
 /* MC_CMD_UART_RECV_DATA_IN msgresponse */
 #define    MC_CMD_UART_RECV_DATA_IN_LENMIN 16
 #define    MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
 /* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
 #define       MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+#define       MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
 /* Offset at which to write the data */
 #define       MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
 /* Length of data */
 #define       MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
 /* Reserved for future use */
 #define       MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
  */
 #define MC_CMD_READ_FUSES 0xf0
 
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_READ_FUSES_IN msgrequest */
 #define    MC_CMD_READ_FUSES_IN_LEN 8
 /* Offset in OTP to read */
 #define       MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+#define       MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
 /* Length of data to read in bytes */
 #define       MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+#define       MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
 
 /* MC_CMD_READ_FUSES_OUT msgresponse */
 #define    MC_CMD_READ_FUSES_OUT_LENMIN 4
 #define    MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
 /* Length of returned OTP data in bytes */
 #define       MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+#define       MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
 /* Returned data */
 #define       MC_CMD_READ_FUSES_OUT_DATA_OFST 4
 #define       MC_CMD_READ_FUSES_OUT_DATA_LEN 1
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC  0x9
 /* enum: CTLE EQ Resistor (0-7, Medford) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES  0xa
+/* enum: CTLE gain (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN  0xb
+/* enum: CTLE pole (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE  0xc
+/* enum: CTLE peaking (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK  0xd
+/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN  0xe
+/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD  0xf
+/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2  0x10
+/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3  0x11
+/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4  0x12
+/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5  0x13
+/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6  0x14
+/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7  0x15
+/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8  0x16
+/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9  0x17
+/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10  0x18
+/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11  0x19
+/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12  0x1a
+/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF  0x1b
+/* enum: Negative h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN  0x1c
+/* enum: Negative h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD  0x1d
+/* enum: Positive h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN  0x1e
+/* enum: Positive h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD  0x1f
+/* enum: CDR calibration loop code (Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT  0x20
+/* enum: CDR integral loop code (Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG  0x21
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0  0x0 /* enum */
 #define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TX Amplitude (Huntington, Medford) */
+/* enum: TX Amplitude (Huntington, Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV  0x0
 /* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE  0x1
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET  0x9
 /* enum: TX Amplitude Fine control (Medford) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE  0xa
-/* enum: Pre-shoot Tap (Medford) */
+/* enum: Pre-shoot Tap (Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV  0xb
-/* enum: De-emphasis Tap (Medford) */
+/* enum: De-emphasis Tap (Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY  0xc
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 /* Align the arguments to 32 bits */
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */
+#define    MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4
+/* Scan duration / cycle count */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4
 
 /* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4
 
 /* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
 #define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+#define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4
 
 
 /***********************************/
 #define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
 #define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
 #define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
 
 /* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
 #define    MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
 #define    MC_CMD_LICENSING_IN_LEN 4
 /* identifies the type of operation requested */
 #define       MC_CMD_LICENSING_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_IN_OP_LEN 4
 /* enum: re-read and apply licenses after a license key partition update; note
  * that this operation returns a zero-length response
  */
 #define    MC_CMD_LICENSING_OUT_LEN 28
 /* count of application keys which are valid */
 #define       MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+#define       MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4
 /* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
  * MC_CMD_FC_OP_LICENSE)
  */
 #define       MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+#define       MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4
 /* count of application keys which are invalid due to being blacklisted */
 #define       MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+#define       MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4
 /* count of application keys which are invalid due to being unverifiable */
 #define       MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+#define       MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4
 /* count of application keys which are invalid due to being for the wrong node
  */
 #define       MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+#define       MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4
 /* licensing state (for diagnostics; the exact meaning of the bits in this
  * field are private to the firmware)
  */
 #define       MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+#define       MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4
 /* licensing subsystem self-test report (for manftest) */
 #define       MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+#define       MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4
 /* enum: licensing subsystem self-test failed */
 #define          MC_CMD_LICENSING_OUT_SELF_TEST_FAIL  0x0
 /* enum: licensing subsystem self-test passed */
 #define    MC_CMD_LICENSING_V3_IN_LEN 4
 /* identifies the type of operation requested */
 #define       MC_CMD_LICENSING_V3_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_IN_OP_LEN 4
 /* enum: re-read and apply licenses after a license key partition update; note
  * that this operation returns a zero-length response
  */
 #define    MC_CMD_LICENSING_V3_OUT_LEN 88
 /* count of keys which are valid */
 #define       MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+#define       MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4
 /* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
  * MC_CMD_FC_OP_LICENSE)
  */
 #define       MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+#define       MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4
 /* count of keys which are invalid due to being unverifiable */
 #define       MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+#define       MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4
 /* count of keys which are invalid due to being for the wrong node */
 #define       MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+#define       MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4
 /* licensing state (for diagnostics; the exact meaning of the bits in this
  * field are private to the firmware)
  */
 #define       MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+#define       MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4
 /* licensing subsystem self-test report (for manftest) */
 #define       MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+#define       MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4
 /* enum: licensing subsystem self-test failed */
 #define          MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL  0x0
 /* enum: licensing subsystem self-test passed */
 #define    MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
 /* type of license (eg 3) */
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
 /* length of the license ID (in bytes) */
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
 /* the unique license ID of the adapter */
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
 #define    MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
 /* application ID to query (LICENSED_APP_ID_xxx) */
 #define       MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+#define       MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4
 
 /* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
 #define    MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
 /* state of this application */
 #define       MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4
 /* enum: no (or invalid) license is present for the application */
 #define          MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED  0x0
 /* enum: a valid license is present for the application */
 #define    MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
 /* state of this application */
 #define       MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
 /* enum: no (or invalid) license is present for the application */
 #define          MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED  0x0
 /* enum: a valid license is present for the application */
 #define    MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
 /* application ID */
 #define       MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
 /* the type of operation requested */
 #define       MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
 /* enum: validate application */
 #define          MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE  0x0
 /* enum: mask application */
 #define    MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
 /* application ID */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
 /* the type of operation requested */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
 /* validation challenge */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
 #define    MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
 /* feature expiry (time_t) */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
 /* validation response */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
 #define    MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
 /* application ID */
 #define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
 /* the type of operation requested */
 #define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
 /* flag */
 #define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
 
 /* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
 #define    MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
 #define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
 /* application expiry time */
 #define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
 /* application expiry units */
 #define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
 /* enum: expiry units are accounting units */
 #define          MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC  0x0
 /* enum: expiry units are calendar days */
  */
 #define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
 
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
 #define    MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
 #define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
 /* whether to turn on or turn off the masked features */
 #define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+#define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
 /* enum: turn the features off */
 #define          MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF  0x0
 /* enum: turn the features back on */
  */
 #define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
 
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
 /* operation code */
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
 /* enum: install a new license, overwriting any existing temporary license.
  * This is an asynchronous operation owing to the time taken to validate an
  * ECDSA license
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
 /* ECDSA license and signature */
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
 
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
 
 /* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
 /* status code */
 #define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
 /* enum: finished validating and installing license */
 #define          MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK  0x0
 /* enum: license validation and installation in progress */
 #define    MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
 /* configuration flags */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
 /* receive queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
  * of 0xFFFFFFFF is guaranteed never to be a valid handle.
  */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
 
 /* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
 #define    MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
 #define    MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
 /* configuration flags */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
 /* receiving queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
 /* enum: receiving to just the specified queue */
 #define          MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE  0x0
 /* enum: receiving to multiple queues using RSS context */
 #define          MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS  0x1
 /* RSS context (for RX_MODE_RSS) */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
 /* the type of configuration setting to change */
 #define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
 /* enum: Per-TXQ enable for multicast UDP destination lookup for possible
  * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
  */
  * on the type of configuration setting being changed
  */
 #define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
 /* new value: the details depend on the type of configuration setting being
  * changed
  */
 #define    MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
 /* the type of configuration setting to read */
 #define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
 /* handle for the entity to query: queue handle, EVB port ID, etc. depending on
  * the type of configuration setting being read
  */
 #define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
 
 /* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
 #define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
 #define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
 /* configuration flags */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
 #define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
 /* receive queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
  * of 0xFFFFFFFF is guaranteed never to be a valid handle.
  */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
 
 /* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
 #define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
 #define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
 /* configuration flags */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
 #define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
 /* receiving queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
 /* enum: receiving to just the specified queue */
 #define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE  0x0
 /* enum: receiving to multiple queues using RSS context */
 #define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS  0x1
 /* RSS context (for RX_MODE_RSS) */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
 /* The rx queue to get stats for. */
 #define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
 #define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
 #define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
 
 /* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
 #define    MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
 
+#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
 #define    MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
 
 #define    MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
 /* The maximum number of PFs the device can expose */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
 /* The maximum number of VFs the device can expose in total */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
 /* The maximum number of MSI-X vectors the device can provide in total */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
 /* the number of MSI-X vectors the device will allocate by default to each PF
  */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
 /* the number of MSI-X vectors the device will allocate by default to each VF
  */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
 /* the maximum number of MSI-X vectors the device can allocate to any one PF */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
 /* the maximum number of MSI-X vectors the device can allocate to any one VF */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_GET_PORT_MODES_OUT_LEN 12
 /* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
 #define       MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+#define       MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
 /* Default (canonical) board mode */
 #define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+#define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
 /* Current board mode */
 #define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+#define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_READ_ATB 0x100
 
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_READ_ATB_IN msgrequest */
 #define    MC_CMD_READ_ATB_IN_LEN 16
 #define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
 #define          MC_CMD_READ_ATB_IN_BUS_CCOM  0x0 /* enum */
 #define          MC_CMD_READ_ATB_IN_BUS_CKR  0x1 /* enum */
 #define          MC_CMD_READ_ATB_IN_BUS_CPCIE  0x8 /* enum */
 #define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
 #define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
 #define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+#define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
 
 /* MC_CMD_READ_ATB_OUT msgresponse */
 #define    MC_CMD_READ_ATB_OUT_LEN 4
 #define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+#define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
 
 
 /***********************************/
 /* Each workaround is represented by a single bit according to the enums below.
  */
 #define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4
 #define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4
 /* enum: Bug 17230 work around. */
 #define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
 /* enum: Bug 35388 work around (unsafe EVQ writes). */
  * 1,3 = 0x00030001
  */
 #define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
  * set to 1.
  */
 #define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN             0x1 /* enum */
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK              0x2 /* enum */
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD            0x4 /* enum */
  * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
  */
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN  0x2000
+/* enum: Privilege for insecure commands. Commands that belong to this group
+ * are not permitted on secure adapters regardless of the privilege mask.
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE          0x4000
 /* enum: Set this bit to indicate that a new privilege mask is to be set,
  * otherwise the command will only read the existing mask.
  */
 #define    MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
 /* For an admin function, always all the privileges are reported. */
 #define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+#define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4
 
 
 /***********************************/
  * e.g. VF 1,3 = 0x00030001
  */
 #define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
 /* New link state mode to be set */
 #define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4
 #define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
 #define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
 #define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
 /* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
 #define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
 #define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
 
 
 /***********************************/
 /* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
  * parameter to MC_CMD_INIT_RXQ.
  */
 #define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
 #define    MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
 /* Minimum acceptable snapshot length. */
 #define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
 /* Maximum acceptable snapshot length. */
 #define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_FUSE_DIAGS 0x102
 
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_FUSE_DIAGS_IN msgrequest */
 #define    MC_CMD_FUSE_DIAGS_IN_LEN 0
 #define    MC_CMD_FUSE_DIAGS_OUT_LEN 48
 /* Total number of mismatched bits between pairs in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
 /* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
 /* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
 /* Checksum of data after logical OR of pairs in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
 /* Total number of mismatched bits between pairs in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
 /* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
 /* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
 /* Checksum of data after logical OR of pairs in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
 /* Total number of mismatched bits between pairs in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
 /* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
 /* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
 /* Checksum of data after logical OR of pairs in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
 /* The groups of functions to have their privilege masks modified. */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_NONE       0x0 /* enum */
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_ALL        0x1 /* enum */
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY   0x2 /* enum */
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_ONE        0x5 /* enum */
 /* For VFS_OF_PF specify the PF, for ONE specify the target function */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
  * refer to the command MC_CMD_PRIVILEGE_MASK
  */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
 /* Privileges to be removed from the target functions. For privilege
  * definitions refer to the command MC_CMD_PRIVILEGE_MASK
  */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
 
 /* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
 #define    MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
 #define    MC_CMD_XPM_READ_BYTES_IN_LEN 8
 /* Start address (byte) */
 #define       MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
 
 /* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
 #define    MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
  */
 #define MC_CMD_XPM_WRITE_BYTES 0x104
 
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
 #define    MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
 /* Start address (byte) */
 #define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
 /* Data */
 #define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
 #define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
  */
 #define MC_CMD_XPM_READ_SECTOR 0x105
 
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
 #define    MC_CMD_XPM_READ_SECTOR_IN_LEN 8
 /* Sector index */
 #define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+#define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
 /* Sector size */
 #define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+#define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
 
 /* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
 #define    MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
 #define    MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
 /* Sector type */
 #define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
 #define          MC_CMD_XPM_READ_SECTOR_OUT_BLANK            0x0 /* enum */
 #define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128   0x1 /* enum */
 #define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256   0x2 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA      0x3 /* enum */
 #define          MC_CMD_XPM_READ_SECTOR_OUT_INVALID          0xff /* enum */
 /* Sector data */
 #define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
  */
 #define MC_CMD_XPM_WRITE_SECTOR 0x106
 
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
 /* Sector type */
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
 /* Sector size */
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
 /* Sector data */
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
 #define    MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
 /* New sector index */
 #define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+#define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
 
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
 #define    MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
 /* Sector index */
 #define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+#define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
 
 /* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
 #define    MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
  */
 #define MC_CMD_XPM_BLANK_CHECK 0x108
 
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
 #define    MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
 /* Start address (byte) */
 #define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
 
 /* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
 #define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
 #define    MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
 /* Total number of bad (non-blank) locations */
 #define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
 /* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
  * into MCDI response)
  */
  */
 #define MC_CMD_XPM_REPAIR 0x109
 
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_REPAIR_IN msgrequest */
 #define    MC_CMD_XPM_REPAIR_IN_LEN 8
 /* Start address (byte) */
 #define       MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
 
 /* MC_CMD_XPM_REPAIR_OUT msgresponse */
 #define    MC_CMD_XPM_REPAIR_OUT_LEN 0
  */
 #define MC_CMD_XPM_DECODER_TEST 0x10a
 
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
 #define    MC_CMD_XPM_DECODER_TEST_IN_LEN 0
  */
 #define MC_CMD_XPM_WRITE_TEST 0x10b
 
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_TEST_IN_LEN 0
 #define    MC_CMD_EXEC_SIGNED_IN_LEN 28
 /* the length of code to include in the CMAC */
 #define       MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+#define       MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
 /* the length of date to include in the CMAC */
 #define       MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+#define       MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
 /* the XPM sector containing the key to use */
 #define       MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+#define       MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
 /* the expected CMAC value */
 #define       MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
 #define       MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
 #define    MC_CMD_PREPARE_SIGNED_IN_LEN 4
 /* the length of data area to clear */
 #define       MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+#define       MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
 
 /* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
 #define    MC_CMD_PREPARE_SIGNED_OUT_LEN 0
 
 
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define    TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT  0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT  0x17c1
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN  0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE  0x1
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
 /***********************************/
 /* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
  * Configure UDP ports for tunnel encapsulation hardware acceleration. The
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
 
-/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
-#define    TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
-/* UDP port (the standard ports are named below but any port may be used) */
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
-/* enum: the IANA allocated UDP port for VXLAN */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT  0x12b5
-/* enum: the IANA allocated UDP port for Geneve */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT  0x17c1
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
-/* tunnel encapsulation protocol (only those named below are supported) */
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
-/* enum: VXLAN */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN  0x0
-/* enum: Geneve */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE  0x1
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
-
 
 /***********************************/
 /* MC_CMD_RX_BALANCING
 #define    MC_CMD_RX_BALANCING_IN_LEN 16
 /* The RX port whose upconverter table will be modified */
 #define       MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define       MC_CMD_RX_BALANCING_IN_PORT_LEN 4
 /* The VLAN priority associated to the table index and vFIFO */
 #define       MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+#define       MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
 /* The resulting bit of SRC^DST for indexing the table */
 #define       MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+#define       MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
 /* The RX engine to which the vFIFO in the table entry will point to */
 #define       MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+#define       MC_CMD_RX_BALANCING_IN_ENG_LEN 4
 
 /* MC_CMD_RX_BALANCING_OUT msgresponse */
 #define    MC_CMD_RX_BALANCING_OUT_LEN 0
 #define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
 /* The tag to be appended */
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
 /* The length of the data */
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
 /* The data to be contained in the TLV structure */
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
 #define    MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
 /* Data type to be checked */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+#define       MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
 
 /* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
 #define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
 #define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
 /* Number of sectors found (test builds only) */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
 /* Number of bytes found (test builds only) */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
 /* Length of signature */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
 /* Signature */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
 #define    MC_CMD_SET_EVQ_TMR_IN_LEN 16
 /* Function-relative queue instance */
 #define       MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+#define       MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4
 /* Requested value for timer load (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4
 /* Requested value for timer reload (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4
 /* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
 #define       MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4
 #define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS  0x0 /* enum */
 #define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START  0x1 /* enum */
 #define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START  0x2 /* enum */
 #define    MC_CMD_SET_EVQ_TMR_OUT_LEN 8
 /* Actual value for timer load (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+#define       MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4
 /* Actual value for timer reload (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+#define       MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
 /* Reserved for future use. */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4
 /* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
  * nanoseconds) for each increment of the timer load/reload count. The
  * requested duration of a timer is this value multiplied by the timer
  * load/reload count.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4
 /* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
  * allowed for timer load/reload counts.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4
 /* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
  * multiple of this step size will be rounded in an implementation defined
  * manner.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4
 /* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
  * meaningful if MC_CMD_SET_EVQ_TMR is implemented.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4
 /* Timer durations requested via MCDI that are not a multiple of this step size
  * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4
 /* For timers updated using the bug35388 workaround, this is the time interval
  * (in nanoseconds) for each increment of the timer load/reload count. The
  * requested duration of a timer is this value multiplied by the timer
  * is enabled.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4
 /* For timers updated using the bug35388 workaround, this is the maximum value
  * allowed for timer load/reload counts. This field is only meaningful if the
  * bug35388 workaround is enabled.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4
 /* For timers updated using the bug35388 workaround, timer load/reload counts
  * not a multiple of this step size will be rounded in an implementation
  * defined manner. This field is only meaningful if the bug35388 workaround is
  * enabled.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
 
 
 /***********************************/
  * local queue index.
  */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
 /* Will the common pool be used as TX_vFIFO_ULL (1) */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED       0x1 /* enum */
 /* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED      0x0
 /* Number of buffers to reserve for the common pool */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
 /* TX datapath to which the Common Pool is connected to. */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
 /* enum: Extracts information from function */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE          -0x1
 /* Network port or RX Engine to which the common pool connects. */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
 /* enum: Extracts information from function */
 /*               MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE          -0x1 */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0          0x0 /* enum */
 #define    MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
 /* ID of the common pool allocated */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
 
 
 /***********************************/
 /* Common pool previously allocated to which the new vFIFO will be associated
  */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
 /* Port or RX engine to associate the vFIFO egress */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
 /* enum: Extracts information from common pool */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE   -0x1
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0          0x0 /* enum */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1     0x5
 /* Minimum number of buffers that the pool must have */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
 /* enum: Do not check the space available */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM     0x0
 /* Will the vFIFO be used as TX_vFIFO_ULL */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
 /* Network priority of the vFIFO,if applicable */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
 /* enum: Search for the lowest unused priority */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE  -0x1
 
 #define    MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
 /* Short vFIFO ID */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
 /* Network priority of the vFIFO */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
 /* Short vFIFO ID */
 #define       MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+#define       MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
 
 /* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
 #define    MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
 #define    MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
 /* Common pool ID given when pool allocated */
 #define       MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+#define       MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
 
 /* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
 #define    MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
 #define    MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
 /* Available buffers for the ENG to NET vFIFOs. */
 #define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+#define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
 /* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
 #define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+#define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
 
 
 #endif /* MCDI_PCOL_H */
index 6e1f282b29765ed7567ee6d51526bf1c2de09672..65ee1a4681703a7faadb0ba81bb9abd5b9b93b7c 100644 (file)
@@ -1087,7 +1087,7 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
        int period = action == EFX_STATS_ENABLE ? 1000 : 0;
        dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
        u32 dma_len = action != EFX_STATS_DISABLE ?
-               MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
+               efx->num_mac_stats * sizeof(u64) : 0;
 
        BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
 
@@ -1121,7 +1121,7 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx)
 {
        __le64 *dma_stats = efx->stats_buffer.addr;
 
-       dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+       dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
 
        efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
 }
@@ -1139,10 +1139,10 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
        __le64 *dma_stats = efx->stats_buffer.addr;
        int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
 
-       dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+       dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
        efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
 
-       while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+       while (dma_stats[efx->num_mac_stats - 1] ==
                                EFX_MC_STATS_GENERATION_INVALID &&
                        attempts-- != 0)
                udelay(EFX_MAC_STATS_WAIT_US);
@@ -1167,7 +1167,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
 
        /* Allocate buffer for stats */
        rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
-                                 MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
+                                 efx->num_mac_stats * sizeof(u64), GFP_KERNEL);
        if (rc)
                return rc;
        netif_dbg(efx, probe, efx->net_dev,
index 6b8730a24513a8180e0c853c53ac4db9fc0436ad..4cedc5c4c6d9741faffe7cddbdc7b815ad7bfd3a 100644 (file)
@@ -774,6 +774,8 @@ struct vfdi_status;
  * @port_initialized: Port initialized?
  * @net_dev: Operating system network device. Consider holding the rtnl lock
  * @fixed_features: Features which cannot be turned off
+ * @num_mac_stats: Number of MAC stats reported by firmware (MAC_STATS_NUM_STATS
+ *     field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS)
  * @stats_buffer: DMA buffer for statistics
  * @phy_type: PHY type
  * @phy_op: PHY interface
@@ -922,6 +924,7 @@ struct efx_nic {
 
        netdev_features_t fixed_features;
 
+       u16 num_mac_stats;
        struct efx_buffer stats_buffer;
        u64 rx_nodesc_drops_total;
        u64 rx_nodesc_drops_while_down;
index 7b51b637172465678b106e554055a2ccf970cecb..763052214525e86ea7f65516ce7ad73d526c8fc2 100644 (file)
@@ -325,6 +325,30 @@ enum {
        EF10_STAT_tx_bad,
        EF10_STAT_tx_bad_bytes,
        EF10_STAT_tx_overflow,
+       EF10_STAT_V1_COUNT,
+       EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT,
+       EF10_STAT_fec_corrected_errors,
+       EF10_STAT_fec_corrected_symbols_lane0,
+       EF10_STAT_fec_corrected_symbols_lane1,
+       EF10_STAT_fec_corrected_symbols_lane2,
+       EF10_STAT_fec_corrected_symbols_lane3,
+       EF10_STAT_ctpio_dmabuf_start,
+       EF10_STAT_ctpio_vi_busy_fallback,
+       EF10_STAT_ctpio_long_write_success,
+       EF10_STAT_ctpio_missing_dbell_fail,
+       EF10_STAT_ctpio_overflow_fail,
+       EF10_STAT_ctpio_underflow_fail,
+       EF10_STAT_ctpio_timeout_fail,
+       EF10_STAT_ctpio_noncontig_wr_fail,
+       EF10_STAT_ctpio_frm_clobber_fail,
+       EF10_STAT_ctpio_invalid_wr_fail,
+       EF10_STAT_ctpio_vi_clobber_fallback,
+       EF10_STAT_ctpio_unqualified_fallback,
+       EF10_STAT_ctpio_runt_fallback,
+       EF10_STAT_ctpio_success,
+       EF10_STAT_ctpio_fallback,
+       EF10_STAT_ctpio_poison,
+       EF10_STAT_ctpio_erase,
        EF10_STAT_COUNT
 };
 
index 22d49ebb347c9bf75f9c59deb581acf134bb223a..ae8645ae4492527269615da0fd95402ae203d21d 100644 (file)
@@ -555,7 +555,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
 
        dma_stats = efx->stats_buffer.addr;
 
-       generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+       generation_end = dma_stats[efx->num_mac_stats - 1];
        if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
                return 0;
        rmb();
index e1e5ac0537606f2192d553c85795428b18fd615d..ce2ea2d491acac195eefe3f01f8ec9df08d3e77c 100644 (file)
@@ -409,7 +409,7 @@ struct stmmac_desc_ops {
        /* get timestamp value */
         u64(*get_timestamp) (void *desc, u32 ats);
        /* get rx timestamp status */
-       int (*get_rx_timestamp_status) (void *desc, u32 ats);
+       int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
        /* Display ring */
        void (*display_ring)(void *head, unsigned int size, bool rx);
        /* set MSS via context descriptor */
index 4b286e27c4ca5cdbbb7c457e31bef1b2e9e7bd94..7e089bf906b4f316034403f9a44fbfd191ee09eb 100644 (file)
@@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc)
        return ret;
 }
 
-static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
+static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
+                                                u32 ats)
 {
        struct dma_desc *p = (struct dma_desc *)desc;
        int ret = -EINVAL;
@@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
 
                        /* Check if timestamp is OK from context descriptor */
                        do {
-                               ret = dwmac4_rx_check_timestamp(desc);
+                               ret = dwmac4_rx_check_timestamp(next_desc);
                                if (ret < 0)
                                        goto exit;
                                i++;
index 7546b3664113a3d776fe19094df71b2adfb99e98..2a828a31281423082995bc332ec51a3f20989804 100644 (file)
@@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
        return ns;
 }
 
-static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
+static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
+                                           u32 ats)
 {
        if (ats) {
                struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
index f817f8f365696d3388e73f85710d30dde43a7d41..db4cee57bb2465eb98fe38cb947624e779da4673 100644 (file)
@@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)
        return ns;
 }
 
-static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
+static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
 {
        struct dma_desc *p = (struct dma_desc *)desc;
 
index 721b616552611aa74ea077e744ec9a0c4836a48f..08c19ebd530674972ceb9ebcb41cd7af4b3fb58d 100644 (file)
@@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
 {
        u32 value = readl(ioaddr + PTP_TCR);
        unsigned long data;
+       u32 reg_value;
 
        /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
         *      formula = (1/ptp_clock) * 1000000000
@@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
 
        data &= PTP_SSIR_SSINC_MASK;
 
+       reg_value = data;
        if (gmac4)
-               data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
+               reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
 
-       writel(data, ioaddr + PTP_SSIR);
+       writel(reg_value, ioaddr + PTP_SSIR);
 
        return data;
 }
index c52a9963c19d5017a4ac5aebef417c5900fc6cf5..0323d672e1c5b365eace923de3e6702b9058011e 100644 (file)
@@ -482,7 +482,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
                desc = np;
 
        /* Check if timestamp is available */
-       if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
+       if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
                ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
                netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
                shhwtstamp = skb_hwtstamps(skb);
index d655a4261e982921a291cbccd6ccae39827fee0f..eb1c6b03c329a8f8fe8f90f1eff8fdfd75e963ce 100644 (file)
@@ -333,9 +333,8 @@ void xlgmac_print_pkt(struct net_device *netdev,
                      struct sk_buff *skb, bool tx_rx)
 {
        struct ethhdr *eth = (struct ethhdr *)skb->data;
-       unsigned char *buf = skb->data;
        unsigned char buffer[128];
-       unsigned int i, j;
+       unsigned int i;
 
        netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 
@@ -346,22 +345,13 @@ void xlgmac_print_pkt(struct net_device *netdev,
        netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
        netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
 
-       for (i = 0, j = 0; i < skb->len;) {
-               j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
-                             buf[i++]);
-
-               if ((i % 32) == 0) {
-                       netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
-                       j = 0;
-               } else if ((i % 16) == 0) {
-                       buffer[j++] = ' ';
-                       buffer[j++] = ' ';
-               } else if ((i % 4) == 0) {
-                       buffer[j++] = ' ';
-               }
+       for (i = 0; i < skb->len; i += 32) {
+               unsigned int len = min(skb->len - i, 32U);
+
+               hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+                                  buffer, sizeof(buffer), false);
+               netdev_dbg(netdev, "  %#06x: %s\n", i, buffer);
        }
-       if (i % 32)
-               netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
 
        netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 }
index b718a02a6bb6055d7841619b42cb400d1eea0984..667c44f68dbc20bc55f76ac78e87cb35302c0340 100644 (file)
@@ -1638,19 +1638,16 @@ static __net_init int geneve_init_net(struct net *net)
        return 0;
 }
 
-static void __net_exit geneve_exit_net(struct net *net)
+static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
 {
        struct geneve_net *gn = net_generic(net, geneve_net_id);
        struct geneve_dev *geneve, *next;
        struct net_device *dev, *aux;
-       LIST_HEAD(list);
-
-       rtnl_lock();
 
        /* gather any geneve devices that were moved into this ns */
        for_each_netdev_safe(net, dev, aux)
                if (dev->rtnl_link_ops == &geneve_link_ops)
-                       unregister_netdevice_queue(dev, &list);
+                       unregister_netdevice_queue(dev, head);
 
        /* now gather any other geneve devices that were created in this ns */
        list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
@@ -1658,18 +1655,29 @@ static void __net_exit geneve_exit_net(struct net *net)
                 * to the list by the previous loop.
                 */
                if (!net_eq(dev_net(geneve->dev), net))
-                       unregister_netdevice_queue(geneve->dev, &list);
+                       unregister_netdevice_queue(geneve->dev, head);
        }
 
+       WARN_ON_ONCE(!list_empty(&gn->sock_list));
+}
+
+static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
+{
+       struct net *net;
+       LIST_HEAD(list);
+
+       rtnl_lock();
+       list_for_each_entry(net, net_list, exit_list)
+               geneve_destroy_tunnels(net, &list);
+
        /* unregister the devices gathered above */
        unregister_netdevice_many(&list);
        rtnl_unlock();
-       WARN_ON_ONCE(!list_empty(&gn->sock_list));
 }
 
 static struct pernet_operations geneve_net_ops = {
        .init = geneve_init_net,
-       .exit = geneve_exit_net,
+       .exit_batch = geneve_exit_batch_net,
        .id   = &geneve_net_id,
        .size = sizeof(struct geneve_net),
 };
index 078d2c37a6c1360bff4d773dc66b063b4c0633e4..a243fa7ae02fb006ed6e0d46d2b7c63fad0194b9 100644 (file)
@@ -107,7 +107,7 @@ int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
        struct tc_cls_bpf_offload *cls_bpf = type_data;
        struct bpf_prog *prog = cls_bpf->prog;
        struct netdevsim *ns = cb_priv;
-       bool skip_sw;
+       struct bpf_prog *oldprog;
 
        if (type != TC_SETUP_CLSBPF ||
            !tc_can_offload(ns->netdev) ||
@@ -115,27 +115,27 @@ int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
            cls_bpf->common.chain_index)
                return -EOPNOTSUPP;
 
-       skip_sw = cls_bpf->gen_flags & TCA_CLS_FLAGS_SKIP_SW;
-
-       if (nsim_xdp_offload_active(ns))
-               return -EBUSY;
-
        if (!ns->bpf_tc_accept)
                return -EOPNOTSUPP;
        /* Note: progs without skip_sw will probably not be dev bound */
        if (prog && !prog->aux->offload && !ns->bpf_tc_non_bound_accept)
                return -EOPNOTSUPP;
 
-       switch (cls_bpf->command) {
-       case TC_CLSBPF_REPLACE:
-               return nsim_bpf_offload(ns, prog, true);
-       case TC_CLSBPF_ADD:
-               return nsim_bpf_offload(ns, prog, false);
-       case TC_CLSBPF_DESTROY:
-               return nsim_bpf_offload(ns, NULL, true);
-       default:
+       if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
                return -EOPNOTSUPP;
+
+       oldprog = cls_bpf->oldprog;
+
+       /* Don't remove if oldprog doesn't match driver's state */
+       if (ns->bpf_offloaded != oldprog) {
+               oldprog = NULL;
+               if (!cls_bpf->prog)
+                       return 0;
+               if (ns->bpf_offloaded)
+                       return -EBUSY;
        }
+
+       return nsim_bpf_offload(ns, cls_bpf->prog, oldprog);
 }
 
 int nsim_bpf_disable_tc(struct netdevsim *ns)
@@ -201,7 +201,6 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
 {
        struct nsim_bpf_bound_prog *state;
        char name[16];
-       int err;
 
        state = kzalloc(sizeof(*state), GFP_KERNEL);
        if (!state)
@@ -214,10 +213,9 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
        /* Program id is not populated yet when we create the state. */
        sprintf(name, "%u", ns->prog_id_gen++);
        state->ddir = debugfs_create_dir(name, ns->ddir_bpf_bound_progs);
-       if (IS_ERR(state->ddir)) {
-               err = PTR_ERR(state->ddir);
+       if (IS_ERR_OR_NULL(state->ddir)) {
                kfree(state);
-               return err;
+               return -ENOMEM;
        }
 
        debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id);
@@ -349,6 +347,8 @@ int nsim_bpf_init(struct netdevsim *ns)
                           &ns->bpf_bind_verifier_delay);
        ns->ddir_bpf_bound_progs =
                debugfs_create_dir("bpf_bound_progs", ns->ddir);
+       if (IS_ERR_OR_NULL(ns->ddir_bpf_bound_progs))
+               return -ENOMEM;
 
        ns->bpf_tc_accept = true;
        debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir,
index eb8c679fca9fb9696fdf62ef029e9147a8ebcc62..3fd567928f3d7c7530919c95b00df398590b6d9d 100644 (file)
@@ -139,7 +139,7 @@ static void nsim_dev_release(struct device *dev)
        free_netdev(ns->netdev);
 }
 
-struct device_type nsim_dev_type = {
+static struct device_type nsim_dev_type = {
        .groups = nsim_dev_attr_groups,
        .release = nsim_dev_release,
 };
@@ -151,6 +151,8 @@ static int nsim_init(struct net_device *dev)
 
        ns->netdev = dev;
        ns->ddir = debugfs_create_dir(netdev_name(dev), nsim_ddir);
+       if (IS_ERR_OR_NULL(ns->ddir))
+               return -ENOMEM;
 
        err = nsim_bpf_init(ns);
        if (err)
@@ -469,8 +471,8 @@ static int __init nsim_module_init(void)
        int err;
 
        nsim_ddir = debugfs_create_dir(DRV_NAME, NULL);
-       if (IS_ERR(nsim_ddir))
-               return PTR_ERR(nsim_ddir);
+       if (IS_ERR_OR_NULL(nsim_ddir))
+               return -ENOMEM;
 
        err = bus_register(&nsim_bus);
        if (err)
index 2fc026dc170a8dd10bb3773442fa94c1e9a3350e..342325a89d5f1aeb53a276bb850e092ce89fd4a0 100644 (file)
@@ -879,6 +879,8 @@ static int m88e1510_config_init(struct phy_device *phydev)
 
        /* SGMII-to-Copper mode initialization */
        if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+               u32 pause;
+
                /* Select page 18 */
                err = marvell_set_page(phydev, 18);
                if (err < 0)
@@ -902,6 +904,16 @@ static int m88e1510_config_init(struct phy_device *phydev)
                err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
                if (err < 0)
                        return err;
+
+               /* There appears to be a bug in the 88e1512 when used in
+                * SGMII to copper mode, where the AN advertisment register
+                * clears the pause bits each time a negotiation occurs.
+                * This means we can never be truely sure what was advertised,
+                * so disable Pause support.
+                */
+               pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+               phydev->supported &= ~pause;
+               phydev->advertising &= ~pause;
        }
 
        return m88e1121_config_init(phydev);
@@ -2070,7 +2082,8 @@ static struct phy_driver marvell_drivers[] = {
                .flags = PHY_HAS_INTERRUPT,
                .probe = marvell_probe,
                .config_init = &m88e1145_config_init,
-               .config_aneg = &marvell_config_aneg,
+               .config_aneg = &m88e1101_config_aneg,
+               .read_status = &genphy_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
                .config_intr = &marvell_config_intr,
                .resume = &genphy_resume,
index bfd3090fb055bac4c40924205036119da5b6ce61..07c6048200c6164ac77a649468063e2fedd404c6 100644 (file)
@@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
        }
 
        ret = xgene_enet_ecc_init(pdata);
-       if (ret)
+       if (ret) {
+               if (pdata->dev->of_node)
+                       clk_disable_unprepare(pdata->clk);
                return ret;
+       }
        xgene_gmac_reset(pdata);
 
        return 0;
@@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev)
                return ret;
 
        mdio_bus = mdiobus_alloc();
-       if (!mdio_bus)
-               return -ENOMEM;
+       if (!mdio_bus) {
+               ret = -ENOMEM;
+               goto out_clk;
+       }
 
        mdio_bus->name = "APM X-Gene MDIO bus";
 
@@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
                mdio_bus->phy_mask = ~0;
                ret = mdiobus_register(mdio_bus);
                if (ret)
-                       goto out;
+                       goto out_mdiobus;
 
                acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1,
                                    acpi_register_phy, NULL, mdio_bus, NULL);
@@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev)
        }
 
        if (ret)
-               goto out;
+               goto out_mdiobus;
 
        pdata->mdio_bus = mdio_bus;
        xgene_mdio_status = true;
 
        return 0;
 
-out:
+out_mdiobus:
        mdiobus_free(mdio_bus);
 
+out_clk:
+       if (dev->of_node)
+               clk_disable_unprepare(pdata->clk);
+
        return ret;
 }
 
index 843c1dde93e4552880841dfb535d676a6ad58ed3..c924700cf37b5f4d5c31f48aa48a1cf185a8078e 100644 (file)
@@ -126,7 +126,7 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value)
 
        gpiod_set_value(mdiodev->reset, value);
 
-       d = value ? mdiodev->reset_delay : mdiodev->reset_post_delay;
+       d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay;
        if (d)
                usleep_range(d, d + max_t(unsigned int, d / 10, 100));
 }
index f7a77747576247d993da18762b09461673854a16..2ec140ec79237326a1aac44f7ae56fcc031f5fea 100644 (file)
@@ -725,6 +725,9 @@ int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
                     phy_interface_mode_is_8023z(pl->link_interface))))
                return -EINVAL;
 
+       if (pl->phydev)
+               return -EBUSY;
+
        /* Use PHY device/driver interface */
        if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
                pl->link_interface = phy->interface;
index 19b9cc51079e75346af766c91786d66eaa92c3f2..82090ae7ced1efc43e27f6731f51927b0435c3e9 100644 (file)
@@ -2155,6 +2155,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
 
                ndst = &rt->dst;
+               if (skb_dst(skb)) {
+                       int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
+
+                       skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
+                                                      skb, mtu);
+               }
+
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
                err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@@ -2190,6 +2197,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                                goto out_unlock;
                }
 
+               if (skb_dst(skb)) {
+                       int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
+
+                       skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
+                                                      skb, mtu);
+               }
+
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
                skb_scrub_packet(skb, xnet);
@@ -3103,6 +3117,11 @@ static void vxlan_config_apply(struct net_device *dev,
 
                max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
                                           VXLAN_HEADROOM);
+               if (max_mtu < ETH_MIN_MTU)
+                       max_mtu = ETH_MIN_MTU;
+
+               if (!changelink && !conf->mtu)
+                       dev->mtu = max_mtu;
        }
 
        if (dev->mtu > max_mtu)
@@ -3692,18 +3711,16 @@ static __net_init int vxlan_init_net(struct net *net)
        return 0;
 }
 
-static void __net_exit vxlan_exit_net(struct net *net)
+static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_dev *vxlan, *next;
        struct net_device *dev, *aux;
        unsigned int h;
-       LIST_HEAD(list);
 
-       rtnl_lock();
        for_each_netdev_safe(net, dev, aux)
                if (dev->rtnl_link_ops == &vxlan_link_ops)
-                       unregister_netdevice_queue(dev, &list);
+                       unregister_netdevice_queue(dev, head);
 
        list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
                /* If vxlan->dev is in the same netns, it has already been added
@@ -3711,20 +3728,30 @@ static void __net_exit vxlan_exit_net(struct net *net)
                 */
                if (!net_eq(dev_net(vxlan->dev), net)) {
                        gro_cells_destroy(&vxlan->gro_cells);
-                       unregister_netdevice_queue(vxlan->dev, &list);
+                       unregister_netdevice_queue(vxlan->dev, head);
                }
        }
 
-       unregister_netdevice_many(&list);
-       rtnl_unlock();
-
        for (h = 0; h < PORT_HASH_SIZE; ++h)
                WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
 }
 
+static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+{
+       struct net *net;
+       LIST_HEAD(list);
+
+       rtnl_lock();
+       list_for_each_entry(net, net_list, exit_list)
+               vxlan_destroy_tunnels(net, &list);
+
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
+}
+
 static struct pernet_operations vxlan_net_ops = {
        .init = vxlan_init_net,
-       .exit = vxlan_exit_net,
+       .exit_batch = vxlan_exit_batch_net,
        .id   = &vxlan_net_id,
        .size = sizeof(struct vxlan_net),
 };
index 87f56d0e17a6c6826c2bcaf545893d4c4162a869..deb5ae21a559bef7c24e48dff919b8f7bfec541c 100644 (file)
@@ -47,12 +47,19 @@ config ATH10K_DEBUG
 config ATH10K_DEBUGFS
        bool "Atheros ath10k debugfs support"
        depends on ATH10K && DEBUG_FS
-       select RELAY
        ---help---
          Enabled debugfs support
 
          If unsure, say Y to make it easier to debug problems.
 
+config ATH10K_SPECTRAL
+       bool "Atheros ath10k spectral scan support"
+       depends on ATH10K_DEBUGFS
+       select RELAY
+       default n
+       ---help---
+         Say Y to enable access to the FFT/spectral data via debugfs.
+
 config ATH10K_TRACING
        bool "Atheros ath10k tracing support"
        depends on ATH10K
index 9492177e906332c621dd2aee95b2e28a808ffc54..8d9a59b7144e27aab23686267c5bfae83bff82bd 100644 (file)
@@ -15,7 +15,7 @@ ath10k_core-y += mac.o \
                 p2p.o \
                 swap.o
 
-ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
+ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o
 ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
 ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
 ath10k_core-$(CONFIG_THERMAL) += thermal.o
index b29fdbd21eadf50a39611f0fe42175dcde1bfdd7..6d065f8d7f78c90f83314026a0b2d2e99fc09dde 100644 (file)
@@ -75,6 +75,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
+               .num_peers = TARGET_TLV_NUM_PEERS,
+               .ast_skid_limit = 0x10,
+               .num_wds_entries = 0x20,
        },
        {
                .id = QCA9887_HW_1_0_VERSION,
@@ -99,6 +102,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
+               .num_peers = TARGET_TLV_NUM_PEERS,
+               .ast_skid_limit = 0x10,
+               .num_wds_entries = 0x20,
        },
        {
                .id = QCA6174_HW_2_1_VERSION,
@@ -122,6 +128,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
+               .num_peers = TARGET_TLV_NUM_PEERS,
+               .ast_skid_limit = 0x10,
+               .num_wds_entries = 0x20,
        },
        {
                .id = QCA6174_HW_2_1_VERSION,
@@ -145,6 +154,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
+               .num_peers = TARGET_TLV_NUM_PEERS,
+               .ast_skid_limit = 0x10,
+               .num_wds_entries = 0x20,
        },
        {
                .id = QCA6174_HW_3_0_VERSION,
@@ -168,6 +180,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
+               .num_peers = TARGET_TLV_NUM_PEERS,
+               .ast_skid_limit = 0x10,
+               .num_wds_entries = 0x20,
        },
        {
                .id = QCA6174_HW_3_2_VERSION,
@@ -194,6 +209,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
+               .num_peers = TARGET_TLV_NUM_PEERS,
+               .ast_skid_limit = 0x10,
+               .num_wds_entries = 0x20,
        },
        {
                .id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -223,6 +241,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 11,
+               .num_peers = TARGET_TLV_NUM_PEERS,
+               .ast_skid_limit = 0x10,
+               .num_wds_entries = 0x20,
        },
        {
                .id = QCA9984_HW_1_0_DEV_VERSION,
@@ -257,6 +278,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 1560,
                .vht160_mcs_tx_highest = 1560,
                .n_cipher_suites = 11,
+               .num_peers = TARGET_TLV_NUM_PEERS,
+               .ast_skid_limit = 0x10,
+               .num_wds_entries = 0x20,
        },
        {
                .id = QCA9888_HW_2_0_DEV_VERSION,
@@ -290,6 +314,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 780,
                .vht160_mcs_tx_highest = 780,
                .n_cipher_suites = 11,
+               .num_peers = TARGET_TLV_NUM_PEERS,
+               .ast_skid_limit = 0x10,
+               .num_wds_entries = 0x20,
        },
        {
                .id = QCA9377_HW_1_0_DEV_VERSION,
@@ -313,6 +340,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
+               .num_peers = TARGET_TLV_NUM_PEERS,
+               .ast_skid_limit = 0x10,
+               .num_wds_entries = 0x20,
        },
        {
                .id = QCA9377_HW_1_1_DEV_VERSION,
@@ -338,6 +368,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 8,
+               .num_peers = TARGET_TLV_NUM_PEERS,
+               .ast_skid_limit = 0x10,
+               .num_wds_entries = 0x20,
        },
        {
                .id = QCA4019_HW_1_0_DEV_VERSION,
@@ -368,6 +401,27 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .vht160_mcs_rx_highest = 0,
                .vht160_mcs_tx_highest = 0,
                .n_cipher_suites = 11,
+               .num_peers = TARGET_TLV_NUM_PEERS,
+               .ast_skid_limit = 0x10,
+               .num_wds_entries = 0x20,
+       },
+       {
+               .id = WCN3990_HW_1_0_DEV_VERSION,
+               .dev_id = 0,
+               .name = "wcn3990 hw1.0",
+               .continuous_frag_desc = true,
+               .tx_chain_mask = 0x7,
+               .rx_chain_mask = 0x7,
+               .max_spatial_stream = 4,
+               .fw = {
+                       .dir = WCN3990_HW_1_0_FW_DIR,
+               },
+               .sw_decrypt_mcast_mgmt = true,
+               .hw_ops = &wcn3990_ops,
+               .decap_align_bytes = 1,
+               .num_peers = TARGET_HL_10_TLV_NUM_PEERS,
+               .ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
+               .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
        },
 };
 
@@ -390,6 +444,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
        [ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
        [ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast",
        [ATH10K_FW_FEATURE_NO_PS] = "no-ps",
+       [ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference",
 };
 
 static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -860,6 +915,28 @@ static int ath10k_core_check_smbios(struct ath10k *ar)
        return 0;
 }
 
+static int ath10k_core_check_dt(struct ath10k *ar)
+{
+       struct device_node *node;
+       const char *variant = NULL;
+
+       node = ar->dev->of_node;
+       if (!node)
+               return -ENOENT;
+
+       of_property_read_string(node, "qcom,ath10k-calibration-variant",
+                               &variant);
+       if (!variant)
+               return -ENODATA;
+
+       if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0)
+               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                          "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+                           variant);
+
+       return 0;
+}
+
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
        u32 result, address = ar->hw_params.patch_load_addr;
@@ -1231,19 +1308,19 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
        /* strlen(',variant=') + strlen(ar->id.bdf_ext) */
        char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
 
+       if (ar->id.bdf_ext[0] != '\0')
+               scnprintf(variant, sizeof(variant), ",variant=%s",
+                         ar->id.bdf_ext);
+
        if (ar->id.bmi_ids_valid) {
                scnprintf(name, name_len,
-                         "bus=%s,bmi-chip-id=%d,bmi-board-id=%d",
+                         "bus=%s,bmi-chip-id=%d,bmi-board-id=%d%s",
                          ath10k_bus_str(ar->hif.bus),
                          ar->id.bmi_chip_id,
-                         ar->id.bmi_board_id);
+                         ar->id.bmi_board_id, variant);
                goto out;
        }
 
-       if (ar->id.bdf_ext[0] != '\0')
-               scnprintf(variant, sizeof(variant), ",variant=%s",
-                         ar->id.bdf_ext);
-
        scnprintf(name, name_len,
                  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
                  ath10k_bus_str(ar->hif.bus),
@@ -2343,7 +2420,11 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
 
        ret = ath10k_core_check_smbios(ar);
        if (ret)
-               ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n");
+               ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
+
+       ret = ath10k_core_check_dt(ar);
+       if (ret)
+               ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
 
        ret = ath10k_core_fetch_board_file(ar);
        if (ret) {
index 643041ef3271cc75b7a1c964007566a606ff0bd9..631df2137e25d369a97ded401612e2a66f7f9e8b 100644 (file)
@@ -67,7 +67,6 @@
 
 /* NAPI poll budget */
 #define ATH10K_NAPI_BUDGET      64
-#define ATH10K_NAPI_QUOTA_LIMIT 60
 
 /* SMBIOS type containing Board Data File Name Extension */
 #define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
@@ -364,11 +363,11 @@ struct ath10k_sta {
        struct rate_info txrate;
 
        struct work_struct update_wk;
+       u64 rx_duration;
 
 #ifdef CONFIG_MAC80211_DEBUGFS
        /* protected by conf_mutex */
        bool aggr_mode;
-       u64 rx_duration;
 #endif
 };
 
@@ -463,7 +462,7 @@ struct ath10k_fw_crash_data {
        bool crashed_since_read;
 
        guid_t guid;
-       struct timespec timestamp;
+       struct timespec64 timestamp;
        __le32 registers[REG_DUMP_COUNT_QCA988X];
        struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX];
 };
@@ -488,7 +487,6 @@ struct ath10k_debug {
        /* protected by conf_mutex */
        u64 fw_dbglog_mask;
        u32 fw_dbglog_level;
-       u32 pktlog_filter;
        u32 reg_addr;
        u32 nf_cal_period;
        void *cal_data;
@@ -615,6 +613,9 @@ enum ath10k_fw_features {
        /* Firmware does not support power save in station mode. */
        ATH10K_FW_FEATURE_NO_PS = 17,
 
+       /* Firmware allows management tx by reference instead of by value. */
+       ATH10K_FW_FEATURE_MGMT_TX_BY_REF = 18,
+
        /* keep last */
        ATH10K_FW_FEATURE_COUNT,
 };
@@ -963,6 +964,7 @@ struct ath10k {
        } spectral;
 #endif
 
+       u32 pktlog_filter;
        struct {
                /* protected by conf_mutex */
                struct ath10k_fw_components utf_mode_fw;
index df514507d3f12d259161a047ef9b85a3a7a0e31f..181fd8e2e61583cf0d788fb6c6f14c68eb7dffe1 100644 (file)
@@ -720,7 +720,7 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
 
        crash_data->crashed_since_read = true;
        guid_gen(&crash_data->guid);
-       getnstimeofday(&crash_data->timestamp);
+       ktime_get_real_ts64(&crash_data->timestamp);
 
        return crash_data;
 }
@@ -1950,14 +1950,14 @@ int ath10k_debug_start(struct ath10k *ar)
                                    ret);
        }
 
-       if (ar->debug.pktlog_filter) {
+       if (ar->pktlog_filter) {
                ret = ath10k_wmi_pdev_pktlog_enable(ar,
-                                                   ar->debug.pktlog_filter);
+                                                   ar->pktlog_filter);
                if (ret)
                        /* not serious */
                        ath10k_warn(ar,
                                    "failed to enable pktlog filter %x: %d\n",
-                                   ar->debug.pktlog_filter, ret);
+                                   ar->pktlog_filter, ret);
        } else {
                ret = ath10k_wmi_pdev_pktlog_disable(ar);
                if (ret)
@@ -2097,12 +2097,12 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
        mutex_lock(&ar->conf_mutex);
 
        if (ar->state != ATH10K_STATE_ON) {
-               ar->debug.pktlog_filter = filter;
+               ar->pktlog_filter = filter;
                ret = count;
                goto out;
        }
 
-       if (filter == ar->debug.pktlog_filter) {
+       if (filter == ar->pktlog_filter) {
                ret = count;
                goto out;
        }
@@ -2111,7 +2111,7 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
                ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
                if (ret) {
                        ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
-                                   ar->debug.pktlog_filter, ret);
+                                   ar->pktlog_filter, ret);
                        goto out;
                }
        } else {
@@ -2122,7 +2122,7 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
                }
        }
 
-       ar->debug.pktlog_filter = filter;
+       ar->pktlog_filter = filter;
        ret = count;
 
 out:
@@ -2139,7 +2139,7 @@ static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
 
        mutex_lock(&ar->conf_mutex);
        len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
-                       ar->debug.pktlog_filter);
+                       ar->pktlog_filter);
        mutex_unlock(&ar->conf_mutex);
 
        return simple_read_from_buffer(ubuf, count, ppos, buf, len);
index 548ad5483a4a2d3d284d8c84120060f1cacf06f8..5e662994c49ab61c1e6fdcdfdf9a06ff19b689f4 100644 (file)
@@ -51,7 +51,8 @@ enum ath10k_pktlog_filter {
        ATH10K_PKTLOG_RCFIND     = 0x000000004,
        ATH10K_PKTLOG_RCUPDATE   = 0x000000008,
        ATH10K_PKTLOG_DBG_PRINT  = 0x000000010,
-       ATH10K_PKTLOG_ANY        = 0x00000001f,
+       ATH10K_PKTLOG_PEER_STATS = 0x000000040,
+       ATH10K_PKTLOG_ANY        = 0x00000005f,
 };
 
 enum ath10k_dbg_aggr_mode {
@@ -60,6 +61,21 @@ enum ath10k_dbg_aggr_mode {
        ATH10K_DBG_AGGR_MODE_MAX,
 };
 
+/* Types of packet log events */
+enum ath_pktlog_type {
+       ATH_PKTLOG_TYPE_TX_CTRL = 1,
+       ATH_PKTLOG_TYPE_TX_STAT,
+};
+
+struct ath10k_pktlog_hdr {
+       __le16 flags;
+       __le16 missed_cnt;
+       __le16 log_type; /* Type of log information foll this header */
+       __le16 size; /* Size of variable length log information in bytes */
+       __le32 timestamp;
+       u8 payload[0];
+} __packed;
+
 /* FIXME: How to calculate the buffer size sanely? */
 #define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
 
@@ -190,9 +206,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                            struct ieee80211_sta *sta, struct dentry *dir);
 void ath10k_sta_update_rx_duration(struct ath10k *ar,
                                   struct ath10k_fw_stats *stats);
-void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                          struct ieee80211_sta *sta,
-                          struct station_info *sinfo);
 #else
 static inline
 void ath10k_sta_update_rx_duration(struct ath10k *ar,
index d59ac6b833400b4c553390869e7a31a5c508cadf..ff96f70d2282b549a6f63b4cb3d7321065dafa09 100644 (file)
@@ -65,33 +65,6 @@ void ath10k_sta_update_rx_duration(struct ath10k *ar,
                ath10k_sta_update_stats_rx_duration(ar, stats);
 }
 
-void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                          struct ieee80211_sta *sta,
-                          struct station_info *sinfo)
-{
-       struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
-       struct ath10k *ar = arsta->arvif->ar;
-
-       if (!ath10k_peer_stats_enabled(ar))
-               return;
-
-       sinfo->rx_duration = arsta->rx_duration;
-       sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
-
-       if (!arsta->txrate.legacy && !arsta->txrate.nss)
-               return;
-
-       if (arsta->txrate.legacy) {
-               sinfo->txrate.legacy = arsta->txrate.legacy;
-       } else {
-               sinfo->txrate.mcs = arsta->txrate.mcs;
-               sinfo->txrate.nss = arsta->txrate.nss;
-               sinfo->txrate.bw = arsta->txrate.bw;
-       }
-       sinfo->txrate.flags = arsta->txrate.flags;
-       sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
-}
-
 static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
                                             char __user *user_buf,
                                             size_t count, loff_t *ppos)
index 6305308422c4440fb1e20df4e6b32050d75573d4..7bd93d627f6b607a277fd5cb352b3104c47b190d 100644 (file)
@@ -1497,6 +1497,23 @@ struct htt_peer_tx_stats {
        u8 payload[0];
 } __packed;
 
+#define ATH10K_10_2_TX_STATS_OFFSET    136
+#define PEER_STATS_FOR_NO_OF_PPDUS     4
+
+struct ath10k_10_2_peer_tx_stats {
+       u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS];
+       u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+       __le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+       u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+       __le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+       u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+       __le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+       u8 flags[PEER_STATS_FOR_NO_OF_PPDUS];
+       __le32 tx_duration;
+       u8 tx_ppdu_cnt;
+       u8 peer_id;
+} __packed;
+
 union htt_rx_pn_t {
        /* WEP: 24-bit PN */
        u32 pn24;
@@ -1695,7 +1712,7 @@ struct ath10k_htt {
        /* This is used to group tx/rx completions separately and process them
         * in batches to reduce cache stalls
         */
-       struct sk_buff_head rx_compl_q;
+       struct sk_buff_head rx_msdus_q;
        struct sk_buff_head rx_in_ord_compl_q;
        struct sk_buff_head tx_fetch_ind_q;
 
index 7d295ee715349d90128330975ebc9d81d5a19235..620ed7dca8360d17c42a09bf1d106b9d06a6c2e1 100644 (file)
@@ -227,7 +227,7 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
 {
        del_timer_sync(&htt->rx_ring.refill_retry_timer);
 
-       skb_queue_purge(&htt->rx_compl_q);
+       skb_queue_purge(&htt->rx_msdus_q);
        skb_queue_purge(&htt->rx_in_ord_compl_q);
        skb_queue_purge(&htt->tx_fetch_ind_q);
 
@@ -515,7 +515,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
        htt->rx_ring.sw_rd_idx.msdu_payld = 0;
        hash_init(htt->rx_ring.skb_table);
 
-       skb_queue_head_init(&htt->rx_compl_q);
+       skb_queue_head_init(&htt->rx_msdus_q);
        skb_queue_head_init(&htt->rx_in_ord_compl_q);
        skb_queue_head_init(&htt->tx_fetch_ind_q);
        atomic_set(&htt->num_mpdus_ready, 0);
@@ -974,16 +974,25 @@ static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
        return out;
 }
 
-static void ath10k_process_rx(struct ath10k *ar,
-                             struct ieee80211_rx_status *rx_status,
-                             struct sk_buff *skb)
+static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
+                                      struct ieee80211_rx_status *rx_status,
+                                      struct sk_buff *skb)
+{
+       struct ieee80211_rx_status *status;
+
+       status = IEEE80211_SKB_RXCB(skb);
+       *status = *rx_status;
+
+       __skb_queue_tail(&ar->htt.rx_msdus_q, skb);
+}
+
+static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ieee80211_rx_status *status;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        char tid[32];
 
        status = IEEE80211_SKB_RXCB(skb);
-       *status = *rx_status;
 
        ath10k_dbg(ar, ATH10K_DBG_DATA,
                   "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
@@ -1517,7 +1526,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
        }
 }
 
-static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
+static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
                                    struct sk_buff_head *amsdu,
                                    struct ieee80211_rx_status *status)
 {
@@ -1540,7 +1549,7 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
                        status->flag |= RX_FLAG_ALLOW_SAME_PN;
                }
 
-               ath10k_process_rx(ar, status, msdu);
+               ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
        }
 }
 
@@ -1652,7 +1661,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
        struct ath10k *ar = htt->ar;
        struct ieee80211_rx_status *rx_status = &htt->rx_status;
        struct sk_buff_head amsdu;
-       int ret, num_msdus;
+       int ret;
 
        __skb_queue_head_init(&amsdu);
 
@@ -1674,7 +1683,6 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
                return ret;
        }
 
-       num_msdus = skb_queue_len(&amsdu);
        ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
 
        /* only for ret = 1 indicates chained msdus */
@@ -1683,9 +1691,9 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
 
        ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
        ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
-       ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
+       ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
 
-       return num_msdus;
+       return 0;
 }
 
 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
@@ -1893,15 +1901,14 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
                        RX_FLAG_MMIC_STRIPPED;
 }
 
-static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
-                                     struct sk_buff_head *list)
+static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+                                      struct sk_buff_head *list)
 {
        struct ath10k_htt *htt = &ar->htt;
        struct ieee80211_rx_status *status = &htt->rx_status;
        struct htt_rx_offload_msdu *rx;
        struct sk_buff *msdu;
        size_t offset;
-       int num_msdu = 0;
 
        while ((msdu = __skb_dequeue(list))) {
                /* Offloaded frames don't have Rx descriptor. Instead they have
@@ -1940,10 +1947,8 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
 
                ath10k_htt_rx_h_rx_offload_prot(status, msdu);
                ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
-               ath10k_process_rx(ar, status, msdu);
-               num_msdu++;
+               ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
        }
-       return num_msdu;
 }
 
 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
@@ -1959,7 +1964,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
        u8 tid;
        bool offload;
        bool frag;
-       int ret, num_msdus = 0;
+       int ret;
 
        lockdep_assert_held(&htt->rx_ring.lock);
 
@@ -2001,7 +2006,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
         * separately.
         */
        if (offload)
-               num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
+               ath10k_htt_rx_h_rx_offload(ar, &list);
 
        while (!skb_queue_empty(&list)) {
                __skb_queue_head_init(&amsdu);
@@ -2014,11 +2019,10 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
                         * better to report something than nothing though. This
                         * should still give an idea about rx rate to the user.
                         */
-                       num_msdus += skb_queue_len(&amsdu);
                        ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
                        ath10k_htt_rx_h_filter(ar, &amsdu, status);
                        ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
-                       ath10k_htt_rx_h_deliver(ar, &amsdu, status);
+                       ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
                        break;
                case -EAGAIN:
                        /* fall through */
@@ -2030,7 +2034,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
                        return -EIO;
                }
        }
-       return num_msdus;
+       return ret;
 }
 
 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
@@ -2449,6 +2453,62 @@ out:
        rcu_read_unlock();
 }
 
+static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
+{
+       struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
+       struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
+       struct ath10k_10_2_peer_tx_stats *tx_stats;
+       struct ieee80211_sta *sta;
+       struct ath10k_peer *peer;
+       u16 log_type = __le16_to_cpu(hdr->log_type);
+       u32 peer_id = 0, i;
+
+       if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
+               return;
+
+       tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
+                   ATH10K_10_2_TX_STATS_OFFSET);
+
+       if (!tx_stats->tx_ppdu_cnt)
+               return;
+
+       peer_id = tx_stats->peer_id;
+
+       rcu_read_lock();
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find_by_id(ar, peer_id);
+       if (!peer) {
+               ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
+                           peer_id);
+               goto out;
+       }
+
+       sta = peer->sta;
+       for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
+               p_tx_stats->succ_bytes =
+                       __le16_to_cpu(tx_stats->success_bytes[i]);
+               p_tx_stats->retry_bytes =
+                       __le16_to_cpu(tx_stats->retry_bytes[i]);
+               p_tx_stats->failed_bytes =
+                       __le16_to_cpu(tx_stats->failed_bytes[i]);
+               p_tx_stats->ratecode = tx_stats->ratecode[i];
+               p_tx_stats->flags = tx_stats->flags[i];
+               p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
+               p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
+               p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
+
+               ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
+       }
+       spin_unlock_bh(&ar->data_lock);
+       rcu_read_unlock();
+
+       return;
+
+out:
+       spin_unlock_bh(&ar->data_lock);
+       rcu_read_unlock();
+}
+
 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ath10k_htt *htt = &ar->htt;
@@ -2566,6 +2626,10 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                                        skb->len -
                                        offsetof(struct htt_resp,
                                                 pktlog_msg.payload));
+
+               if (ath10k_peer_stats_enabled(ar))
+                       ath10k_fetch_10_2_tx_stats(ar,
+                                                  resp->pktlog_msg.payload);
                break;
        }
        case HTT_T2H_MSG_TYPE_RX_FLUSH: {
@@ -2631,6 +2695,24 @@ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
 }
 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
 
+static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
+{
+       struct sk_buff *skb;
+
+       while (quota < budget) {
+               if (skb_queue_empty(&ar->htt.rx_msdus_q))
+                       break;
+
+               skb = __skb_dequeue(&ar->htt.rx_msdus_q);
+               if (!skb)
+                       break;
+               ath10k_process_rx(ar, skb);
+               quota++;
+       }
+
+       return quota;
+}
+
 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
 {
        struct ath10k_htt *htt = &ar->htt;
@@ -2638,63 +2720,44 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
        struct sk_buff_head tx_ind_q;
        struct sk_buff *skb;
        unsigned long flags;
-       int quota = 0, done, num_rx_msdus;
+       int quota = 0, done, ret;
        bool resched_napi = false;
 
        __skb_queue_head_init(&tx_ind_q);
 
-       /* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
-        * process it first to utilize full available quota.
+       /* Process pending frames before dequeuing more data
+        * from hardware.
         */
-       while (quota < budget) {
-               if (skb_queue_empty(&htt->rx_in_ord_compl_q))
-                       break;
-
-               skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
-               if (!skb) {
-                       resched_napi = true;
-                       goto exit;
-               }
+       quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+       if (quota == budget) {
+               resched_napi = true;
+               goto exit;
+       }
 
+       while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
                spin_lock_bh(&htt->rx_ring.lock);
-               num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
+               ret = ath10k_htt_rx_in_ord_ind(ar, skb);
                spin_unlock_bh(&htt->rx_ring.lock);
-               if (num_rx_msdus < 0) {
-                       resched_napi = true;
-                       goto exit;
-               }
 
                dev_kfree_skb_any(skb);
-               if (num_rx_msdus > 0)
-                       quota += num_rx_msdus;
-
-               if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
-                   !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
+               if (ret == -EIO) {
                        resched_napi = true;
                        goto exit;
                }
        }
 
-       while (quota < budget) {
-               /* no more data to receive */
-               if (!atomic_read(&htt->num_mpdus_ready))
-                       break;
-
-               num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
-               if (num_rx_msdus < 0) {
+       while (atomic_read(&htt->num_mpdus_ready)) {
+               ret = ath10k_htt_rx_handle_amsdu(htt);
+               if (ret == -EIO) {
                        resched_napi = true;
                        goto exit;
                }
-
-               quota += num_rx_msdus;
                atomic_dec(&htt->num_mpdus_ready);
-               if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
-                   atomic_read(&htt->num_mpdus_ready)) {
-                       resched_napi = true;
-                       goto exit;
-               }
        }
 
+       /* Deliver received data after processing data from hardware */
+       quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+
        /* From NAPI documentation:
         *  The napi poll() function may also process TX completions, in which
         *  case if it processes the entire TX ring then it should count that
index 88955bbe20bd3d0350878cac6d1a6af023f24cdb..c31eea63277773d8de18cc58a897834f958ac9d7 100644 (file)
@@ -931,3 +931,5 @@ const struct ath10k_hw_ops qca6174_ops = {
        .set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
        .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
 };
+
+const struct ath10k_hw_ops wcn3990_ops = {};
index 05f26e5858ad85871e73ecad36c4afc4b3984ade..90ad39bdeec43282904e9a5a1b436c0a48c187ee 100644 (file)
@@ -128,6 +128,10 @@ enum qca9377_chip_id_rev {
 #define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
 #define QCA4019_HW_1_0_PATCH_LOAD_ADDR  0x1234
 
+/* WCN3990 1.0 definitions */
+#define WCN3990_HW_1_0_DEV_VERSION     ATH10K_HW_WCN3990
+#define WCN3990_HW_1_0_FW_DIR          ATH10K_FW_DIR "/WCN3990/hw3.0"
+
 #define ATH10K_FW_FILE_BASE            "firmware"
 #define ATH10K_FW_API_MAX              6
 #define ATH10K_FW_API_MIN              2
@@ -553,6 +557,10 @@ struct ath10k_hw_params {
 
        /* Number of ciphers supported (i.e First N) in cipher_suites array */
        int n_cipher_suites;
+
+       u32 num_peers;
+       u32 ast_skid_limit;
+       u32 num_wds_entries;
 };
 
 struct htt_rx_desc;
@@ -567,6 +575,7 @@ struct ath10k_hw_ops {
 extern const struct ath10k_hw_ops qca988x_ops;
 extern const struct ath10k_hw_ops qca99x0_ops;
 extern const struct ath10k_hw_ops qca6174_ops;
+extern const struct ath10k_hw_ops wcn3990_ops;
 
 extern const struct ath10k_hw_clk_params qca6174_clk[];
 
@@ -663,6 +672,11 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
 #define TARGET_TLV_NUM_MSDU_DESC               (1024 + 32)
 #define TARGET_TLV_NUM_WOW_PATTERNS            22
 
+/* Target specific defines for WMI-HL-1.0 firmware */
+#define TARGET_HL_10_TLV_NUM_PEERS             14
+#define TARGET_HL_10_TLV_AST_SKID_LIMIT                6
+#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES       2
+
 /* Diagnostic Window */
 #define CE_DIAG_PIPE   7
 
index 0a947eef348d2b97111500d9344f892f44fd602e..75726f12a31c90b8c99ebf64b334a0dea3e801a4 100644 (file)
@@ -2563,7 +2563,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
                }
                break;
        case WMI_VDEV_TYPE_STA:
-               if (vif->bss_conf.qos)
+               if (sta->wme)
                        arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
                break;
        case WMI_VDEV_TYPE_IBSS:
@@ -3574,7 +3574,9 @@ ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
                return ATH10K_MAC_TX_HTT;
        case ATH10K_HW_TXRX_MGMT:
                if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
-                            ar->running_fw->fw_file.fw_features))
+                            ar->running_fw->fw_file.fw_features) ||
+                            test_bit(WMI_SERVICE_MGMT_TX_WMI,
+                                     ar->wmi.svc_map))
                        return ATH10K_MAC_TX_WMI_MGMT;
                else if (ar->htt.target_version_major >= 3)
                        return ATH10K_MAC_TX_HTT;
@@ -6201,6 +6203,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                           "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
                           arvif->vdev_id, sta->addr, sta);
 
+               if (sta->tdls) {
+                       ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
+                                                         sta,
+                                                         WMI_TDLS_PEER_STATE_TEARDOWN);
+                       if (ret)
+                               ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
+                                           sta->addr,
+                                           WMI_TDLS_PEER_STATE_TEARDOWN, ret);
+               }
+
                ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
                if (ret)
                        ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
@@ -7536,6 +7548,16 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
                                    arvif->vdev_id, ret);
        }
 
+       if (ath10k_peer_stats_enabled(ar)) {
+               ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS;
+               ret = ath10k_wmi_pdev_pktlog_enable(ar,
+                                                   ar->pktlog_filter);
+               if (ret) {
+                       ath10k_warn(ar, "failed to enable pktlog %d\n", ret);
+                       goto err_stop;
+               }
+       }
+
        mutex_unlock(&ar->conf_mutex);
        return 0;
 
@@ -7620,6 +7642,34 @@ static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
                        peer->removed = true;
 }
 
+static void ath10k_sta_statistics(struct ieee80211_hw *hw,
+                                 struct ieee80211_vif *vif,
+                                 struct ieee80211_sta *sta,
+                                 struct station_info *sinfo)
+{
+       struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+       struct ath10k *ar = arsta->arvif->ar;
+
+       if (!ath10k_peer_stats_enabled(ar))
+               return;
+
+       sinfo->rx_duration = arsta->rx_duration;
+       sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
+
+       if (!arsta->txrate.legacy && !arsta->txrate.nss)
+               return;
+
+       if (arsta->txrate.legacy) {
+               sinfo->txrate.legacy = arsta->txrate.legacy;
+       } else {
+               sinfo->txrate.mcs = arsta->txrate.mcs;
+               sinfo->txrate.nss = arsta->txrate.nss;
+               sinfo->txrate.bw = arsta->txrate.bw;
+       }
+       sinfo->txrate.flags = arsta->txrate.flags;
+       sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
+}
+
 static const struct ieee80211_ops ath10k_ops = {
        .tx                             = ath10k_mac_op_tx,
        .wake_tx_queue                  = ath10k_mac_op_wake_tx_queue,
@@ -7661,6 +7711,7 @@ static const struct ieee80211_ops ath10k_ops = {
        .unassign_vif_chanctx           = ath10k_mac_op_unassign_vif_chanctx,
        .switch_vif_chanctx             = ath10k_mac_op_switch_vif_chanctx,
        .sta_pre_rcu_remove             = ath10k_mac_op_sta_pre_rcu_remove,
+       .sta_statistics                 = ath10k_sta_statistics,
 
        CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
 
@@ -7671,7 +7722,6 @@ static const struct ieee80211_ops ath10k_ops = {
 #endif
 #ifdef CONFIG_MAC80211_DEBUGFS
        .sta_add_debugfs                = ath10k_sta_add_debugfs,
-       .sta_statistics                 = ath10k_sta_statistics,
 #endif
 };
 
@@ -8329,15 +8379,6 @@ int ath10k_mac_register(struct ath10k *ar)
                        ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
        }
 
-       /* Current wake_tx_queue implementation imposes a significant
-        * performance penalty in some setups. The tx scheduling code needs
-        * more work anyway so disable the wake_tx_queue unless firmware
-        * supports the pull-push mechanism.
-        */
-       if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
-                     ar->running_fw->fw_file.fw_features))
-               ar->ops->wake_tx_queue = NULL;
-
        ret = ath10k_mac_init_rd(ar);
        if (ret) {
                ath10k_err(ar, "failed to derive regdom: %d\n", ret);
index 89b0ad769d4f98388b619ab7736debd6894749db..b2a2e8ae04b8cf5d9da02fd65d4bf177a1d24660 100644 (file)
@@ -44,7 +44,7 @@ enum ath10k_spectral_mode {
        SPECTRAL_MANUAL,
 };
 
-#ifdef CONFIG_ATH10K_DEBUGFS
+#ifdef CONFIG_ATH10K_SPECTRAL
 
 int ath10k_spectral_process_fft(struct ath10k *ar,
                                struct wmi_phyerr_ev_arg *phyerr,
@@ -85,6 +85,6 @@ static inline void ath10k_spectral_destroy(struct ath10k *ar)
 {
 }
 
-#endif /* CONFIG_ATH10K_DEBUGFS */
+#endif /* CONFIG_ATH10K_SPECTRAL */
 
 #endif /* SPECTRAL_H */
index 2fc3f24ff1caad9d302bc78ad2194b8cec581942..41eef942ab2c5c947f73cddcd8436c015f4535f4 100644 (file)
@@ -377,6 +377,7 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
        struct sk_buff *skb;
        int ret;
+       u32 mgmt_tx_cmdid;
 
        if (!ar->wmi.ops->gen_mgmt_tx)
                return -EOPNOTSUPP;
@@ -385,7 +386,13 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
-       ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
+       if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+                    ar->running_fw->fw_file.fw_features))
+               mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_send_cmdid;
+       else
+               mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_cmdid;
+
+       ret = ath10k_wmi_cmd_send(ar, skb, mgmt_tx_cmdid);
        if (ret)
                return ret;
 
index 7616c1c4bbd32a5f4cdf343fe8ece9a90bf2db22..8d53063bd50350c052bb1535fb97620c3b0c2bce 100644 (file)
@@ -917,33 +917,69 @@ ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
        return -ENOMEM;
 }
 
+struct wmi_tlv_svc_rdy_parse {
+       const struct hal_reg_capabilities *reg;
+       const struct wmi_tlv_svc_rdy_ev *ev;
+       const __le32 *svc_bmap;
+       const struct wlan_host_mem_req *mem_reqs;
+       bool svc_bmap_done;
+       bool dbs_hw_mode_done;
+};
+
+static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
+                                       const void *ptr, void *data)
+{
+       struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
+
+       switch (tag) {
+       case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
+               svc_rdy->ev = ptr;
+               break;
+       case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
+               svc_rdy->reg = ptr;
+               break;
+       case WMI_TLV_TAG_ARRAY_STRUCT:
+               svc_rdy->mem_reqs = ptr;
+               break;
+       case WMI_TLV_TAG_ARRAY_UINT32:
+               if (!svc_rdy->svc_bmap_done) {
+                       svc_rdy->svc_bmap_done = true;
+                       svc_rdy->svc_bmap = ptr;
+               } else if (!svc_rdy->dbs_hw_mode_done) {
+                       svc_rdy->dbs_hw_mode_done = true;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
 static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
                                             struct sk_buff *skb,
                                             struct wmi_svc_rdy_ev_arg *arg)
 {
-       const void **tb;
        const struct hal_reg_capabilities *reg;
        const struct wmi_tlv_svc_rdy_ev *ev;
        const __le32 *svc_bmap;
        const struct wlan_host_mem_req *mem_reqs;
+       struct wmi_tlv_svc_rdy_parse svc_rdy = { };
        int ret;
 
-       tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
-       if (IS_ERR(tb)) {
-               ret = PTR_ERR(tb);
+       ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+                                 ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
+       if (ret) {
                ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
                return ret;
        }
 
-       ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
-       reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
-       svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
-       mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
+       ev = svc_rdy.ev;
+       reg = svc_rdy.reg;
+       svc_bmap = svc_rdy.svc_bmap;
+       mem_reqs = svc_rdy.mem_reqs;
 
-       if (!ev || !reg || !svc_bmap || !mem_reqs) {
-               kfree(tb);
+       if (!ev || !reg || !svc_bmap || !mem_reqs)
                return -EPROTO;
-       }
 
        /* This is an internal ABI compatibility check for WMI TLV so check it
         * here instead of the generic WMI code.
@@ -961,7 +997,6 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
            __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
            __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
            __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
-               kfree(tb);
                return -ENOTSUPP;
        }
 
@@ -982,12 +1017,10 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
        ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
                                  ath10k_wmi_tlv_parse_mem_reqs, arg);
        if (ret) {
-               kfree(tb);
                ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
                return ret;
        }
 
-       kfree(tb);
        return 0;
 }
 
@@ -1406,7 +1439,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
        cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
 
        cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
-       cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
+
+       cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
+       cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
+       cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
 
        if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
                cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
@@ -1418,7 +1454,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
 
        cfg->num_peer_keys = __cpu_to_le32(2);
        cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
-       cfg->ast_skid_limit = __cpu_to_le32(0x10);
        cfg->tx_chain_mask = __cpu_to_le32(0x7);
        cfg->rx_chain_mask = __cpu_to_le32(0x7);
        cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
@@ -1434,7 +1469,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
        cfg->num_mcast_table_elems = __cpu_to_le32(0);
        cfg->mcast2ucast_mode = __cpu_to_le32(0);
        cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
-       cfg->num_wds_entries = __cpu_to_le32(0x20);
        cfg->dma_burst_size = __cpu_to_le32(0);
        cfg->mac_aggr_delim = __cpu_to_le32(0);
        cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
@@ -2449,6 +2483,82 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
        return skb;
 }
 
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+       struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+       struct wmi_tlv_mgmt_tx_cmd *cmd;
+       struct wmi_tlv *tlv;
+       struct ieee80211_hdr *hdr;
+       struct sk_buff *skb;
+       void *ptr;
+       int len;
+       u32 buf_len = msdu->len;
+       u16 fc;
+       struct ath10k_vif *arvif;
+       dma_addr_t mgmt_frame_dma;
+       u32 vdev_id;
+
+       if (!cb->vif)
+               return ERR_PTR(-EINVAL);
+
+       hdr = (struct ieee80211_hdr *)msdu->data;
+       fc = le16_to_cpu(hdr->frame_control);
+       arvif = (void *)cb->vif->drv_priv;
+       vdev_id = arvif->vdev_id;
+
+       if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+               return ERR_PTR(-EINVAL);
+
+       len = sizeof(*cmd) + 2 * sizeof(*tlv);
+
+       if ((ieee80211_is_action(hdr->frame_control) ||
+            ieee80211_is_deauth(hdr->frame_control) ||
+            ieee80211_is_disassoc(hdr->frame_control)) &&
+            ieee80211_has_protected(hdr->frame_control)) {
+               len += IEEE80211_CCMP_MIC_LEN;
+               buf_len += IEEE80211_CCMP_MIC_LEN;
+       }
+
+       buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
+       buf_len = round_up(buf_len, 4);
+
+       len += buf_len;
+       len = round_up(len, 4);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       ptr = (void *)skb->data;
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       cmd->desc_id = 0;
+       cmd->chanfreq = 0;
+       cmd->buf_len = __cpu_to_le32(buf_len);
+       cmd->frame_len = __cpu_to_le32(msdu->len);
+       mgmt_frame_dma = dma_map_single(arvif->ar->dev, msdu->data,
+                                       msdu->len, DMA_TO_DEVICE);
+       if (!mgmt_frame_dma)
+               return ERR_PTR(-ENOMEM);
+
+       cmd->paddr = __cpu_to_le64(mgmt_frame_dma);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*cmd);
+
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+       tlv->len = __cpu_to_le16(buf_len);
+
+       ptr += sizeof(*tlv);
+       memcpy(ptr, msdu->data, buf_len);
+
+       return skb;
+}
+
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
                                    enum wmi_force_fw_hang_type type,
@@ -3258,6 +3368,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
        .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
        .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
        .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
+       .mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
        .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
        .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
        .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
@@ -3592,6 +3703,7 @@ static const struct wmi_ops wmi_tlv_ops = {
        .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
        .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
        /* .gen_mgmt_tx = not implemented; HTT is used */
+       .gen_mgmt_tx =  ath10k_wmi_tlv_op_gen_mgmt_tx,
        .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
        .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
        .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
index 22cf011e839afc190c55c65a387351787369e37d..4faaa64a40d5b902d5c71c903fb78c7b31ea95c2 100644 (file)
@@ -22,6 +22,7 @@
 #define WMI_TLV_CMD_UNSUPPORTED 0
 #define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
 #define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_MGMT_TX_FRAME_MAX_LEN  64
 
 enum wmi_tlv_grp_id {
        WMI_TLV_GRP_START = 0x3,
@@ -132,6 +133,7 @@ enum wmi_tlv_cmd_id {
        WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
        WMI_TLV_MGMT_TX_CMDID,
        WMI_TLV_PRB_TMPL_CMDID,
+       WMI_TLV_MGMT_TX_SEND_CMD,
        WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
        WMI_TLV_ADDBA_SEND_CMDID,
        WMI_TLV_ADDBA_STATUS_CMDID,
@@ -890,6 +892,63 @@ enum wmi_tlv_tag {
        WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
        WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
        WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
+       WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_CMD,
+       WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_EVENT,
+       WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_CMD,
+       WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_RESP_EVENT,
+       WMI_TLV_TAG_STRUCT_OCB_SET_UTC_TIME_CMD,
+       WMI_TLV_TAG_STRUCT_OCB_START_TIMING_ADVERT_CMD,
+       WMI_TLV_TAG_STRUCT_OCB_STOP_TIMING_ADVERT_CMD,
+       WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_CMD,
+       WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_RESP_EVENT,
+       WMI_TLV_TAG_STRUCT_DCC_GET_STATS_CMD,
+       WMI_TLV_TAG_STRUCT_DCC_CHANNEL_STATS_REQUEST,
+       WMI_TLV_TAG_STRUCT_DCC_GET_STATS_RESP_EVENT,
+       WMI_TLV_TAG_STRUCT_DCC_CLEAR_STATS_CMD,
+       WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_CMD,
+       WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_RESP_EVENT,
+       WMI_TLV_TAG_STRUCT_DCC_STATS_EVENT,
+       WMI_TLV_TAG_STRUCT_OCB_CHANNEL,
+       WMI_TLV_TAG_STRUCT_OCB_SCHEDULE_ELEMENT,
+       WMI_TLV_TAG_STRUCT_DCC_NDL_STATS_PER_CHANNEL,
+       WMI_TLV_TAG_STRUCT_DCC_NDL_CHAN,
+       WMI_TLV_TAG_STRUCT_QOS_PARAMETER,
+       WMI_TLV_TAG_STRUCT_DCC_NDL_ACTIVE_STATE_CONFIG,
+       WMI_TLV_TAG_STRUCT_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+       WMI_TLV_TAG_STRUCT_ROAM_FILTER_FIXED_PARAM,
+       WMI_TLV_TAG_STRUCT_PASSPOINT_CONFIG_CMD,
+       WMI_TLV_TAG_STRUCT_PASSPOINT_EVENT_HDR,
+       WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+       WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+       WMI_TLV_TAG_STRUCT_VDEV_TSF_TSTAMP_ACTION_CMD,
+       WMI_TLV_TAG_STRUCT_VDEV_TSF_REPORT_EVENT,
+       WMI_TLV_TAG_STRUCT_GET_FW_MEM_DUMP,
+       WMI_TLV_TAG_STRUCT_UPDATE_FW_MEM_DUMP,
+       WMI_TLV_TAG_STRUCT_FW_MEM_DUMP_PARAMS,
+       WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH,
+       WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH_COMPLETE,
+       WMI_TLV_TAG_STRUCT_PEER_SET_RATE_REPORT_CONDITION,
+       WMI_TLV_TAG_STRUCT_ROAM_SUBNET_CHANGE_CONFIG,
+       WMI_TLV_TAG_STRUCT_VDEV_SET_IE_CMD,
+       WMI_TLV_TAG_STRUCT_RSSI_BREACH_MONITOR_CONFIG,
+       WMI_TLV_TAG_STRUCT_RSSI_BREACH_EVENT,
+       WMI_TLV_TAG_STRUCT_EVENT_INITIAL_WAKEUP,
+       WMI_TLV_TAG_STRUCT_SOC_SET_PCL_CMD,
+       WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_CMD,
+       WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_EVENT,
+       WMI_TLV_TAG_STRUCT_SOC_HW_MODE_TRANSITION_EVENT,
+       WMI_TLV_TAG_STRUCT_VDEV_TXRX_STREAMS,
+       WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+       WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_CMD,
+       WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+       WMI_TLV_TAG_STRUCT_IOAC_SOCK_PATTERN_T,
+       WMI_TLV_TAG_STRUCT_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+       WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_CONFIG,
+       WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_SUPPORTED_EVENT,
+       WMI_TLV_TAG_STRUCT_PACKET_FILTER_CONFIG,
+       WMI_TLV_TAG_STRUCT_PACKET_FILTER_ENABLE,
+       WMI_TLV_TAG_STRUCT_SAP_SET_BLACKLIST_PARAM_CMD,
+       WMI_TLV_TAG_STRUCT_MGMT_TX_CMD,
 
        WMI_TLV_TAG_MAX
 };
@@ -965,6 +1024,50 @@ enum wmi_tlv_service {
        WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
        WMI_TLV_SERVICE_MDNS_OFFLOAD,
        WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+       WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
+       WMI_TLV_SERVICE_OCB,
+       WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD,
+       WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT,
+       WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD,
+       WMI_TLV_SERVICE_MGMT_TX_HTT,
+       WMI_TLV_SERVICE_MGMT_TX_WMI,
+       WMI_TLV_SERVICE_EXT_MSG,
+       WMI_TLV_SERVICE_MAWC,
+       WMI_TLV_SERVICE_PEER_ASSOC_CONF,
+       WMI_TLV_SERVICE_EGAP,
+       WMI_TLV_SERVICE_STA_PMF_OFFLOAD,
+       WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY,
+       WMI_TLV_SERVICE_ENHANCED_PROXY_STA,
+       WMI_TLV_SERVICE_ATF,
+       WMI_TLV_SERVICE_COEX_GPIO,
+       WMI_TLV_SERVICE_AUX_SPECTRAL_INTF,
+       WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF,
+       WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64,
+       WMI_TLV_SERVICE_ENTERPRISE_MESH,
+       WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT,
+       WMI_TLV_SERVICE_BPF_OFFLOAD,
+       WMI_TLV_SERVICE_SYNC_DELETE_CMDS,
+       WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+       WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+       WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES,
+       WMI_TLV_SERVICE_NAN_DATA,
+       WMI_TLV_SERVICE_NAN_RTT,
+       WMI_TLV_SERVICE_11AX,
+       WMI_TLV_SERVICE_DEPRECATED_REPLACE,
+       WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+       WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER,
+       WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+       WMI_TLV_SERVICE_MESH_11S,
+       WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT,
+       WMI_TLV_SERVICE_VDEV_RX_FILTER,
+       WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT,
+       WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET,
+       WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET,
+       WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER,
+       WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT,
+       WMI_TLV_SERVICE_WLAN_STATS_REPORT,
+       WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT,
+       WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD,
 };
 
 #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
@@ -1121,6 +1224,8 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
               WMI_SERVICE_MDNS_OFFLOAD, len);
        SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
               WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
+       SVCMAP(WMI_TLV_SERVICE_MGMT_TX_WMI,
+              WMI_SERVICE_MGMT_TX_WMI, len);
 }
 
 #undef SVCMAP
@@ -1643,4 +1748,12 @@ struct wmi_tlv_tx_pause_ev {
 
 void ath10k_wmi_tlv_attach(struct ath10k *ar);
 
+struct wmi_tlv_mgmt_tx_cmd {
+       __le32 vdev_id;
+       __le32 desc_id;
+       __le32 chanfreq;
+       __le64 paddr;
+       __le32 frame_len;
+       __le32 buf_len;
+} __packed;
 #endif
index cad2e42dcef63f4f54f3f413e248f44f250d4b6b..b6cbc0281fe18cb96c617345397be940f3c33f33 100644 (file)
@@ -29,6 +29,7 @@
 #include "p2p.h"
 #include "hw.h"
 #include "hif.h"
+#include "txrx.h"
 
 #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
@@ -4456,6 +4457,74 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
                   __le32_to_cpu(ev->rate_max));
 }
 
+static void
+ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_tdls_peer_event *ev;
+       struct ath10k_peer *peer;
+       struct ath10k_vif *arvif;
+       int vdev_id;
+       int peer_status;
+       int peer_reason;
+       u8 reason;
+
+       if (skb->len < sizeof(*ev)) {
+               ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
+                          skb->len);
+               return;
+       }
+
+       ev = (struct wmi_tdls_peer_event *)skb->data;
+       vdev_id = __le32_to_cpu(ev->vdev_id);
+       peer_status = __le32_to_cpu(ev->peer_status);
+       peer_reason = __le32_to_cpu(ev->peer_reason);
+
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
+       spin_unlock_bh(&ar->data_lock);
+
+       if (!peer) {
+               ath10k_warn(ar, "failed to find peer entry for %pM\n",
+                           ev->peer_macaddr.addr);
+               return;
+       }
+
+       switch (peer_status) {
+       case WMI_TDLS_SHOULD_TEARDOWN:
+               switch (peer_reason) {
+               case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
+               case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
+               case WMI_TDLS_TEARDOWN_REASON_RSSI:
+                       reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
+                       break;
+               default:
+                       reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+                       break;
+               }
+
+               arvif = ath10k_get_arvif(ar, vdev_id);
+               if (!arvif) {
+                       ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
+                                   vdev_id);
+                       return;
+               }
+
+               ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
+                                           NL80211_TDLS_TEARDOWN, reason,
+                                           GFP_ATOMIC);
+
+               ath10k_dbg(ar, ATH10K_DBG_WMI,
+                          "received tdls teardown event for peer %pM reason %u\n",
+                          ev->peer_macaddr.addr, peer_reason);
+               break;
+       default:
+               ath10k_dbg(ar, ATH10K_DBG_WMI,
+                          "received unknown tdls peer event %u\n",
+                          peer_status);
+               break;
+       }
+}
+
 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
 {
        ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
@@ -5477,6 +5546,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
        case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
                ath10k_wmi_event_pdev_tpc_config(ar, skb);
                break;
+       case WMI_10_4_TDLS_PEER_EVENTID:
+               ath10k_wmi_handle_tdls_peer_event(ar, skb);
+               break;
        default:
                ath10k_warn(ar, "Unknown eventid: %d\n", id);
                break;
index c02b21cff38d5b047cbda35dabcad9f2e9e6a47d..f6d60dc16d1c120de966c619e9363b78613aa8e8 100644 (file)
@@ -195,6 +195,7 @@ enum wmi_service {
        WMI_SERVICE_SMART_LOGGING_SUPPORT,
        WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
        WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+       WMI_SERVICE_MGMT_TX_WMI,
 
        /* keep last */
        WMI_SERVICE_MAX,
@@ -797,6 +798,7 @@ struct wmi_cmd_map {
        u32 bcn_filter_rx_cmdid;
        u32 prb_req_filter_rx_cmdid;
        u32 mgmt_tx_cmdid;
+       u32 mgmt_tx_send_cmdid;
        u32 prb_tmpl_cmdid;
        u32 addba_clear_resp_cmdid;
        u32 addba_send_cmdid;
@@ -5236,7 +5238,8 @@ enum wmi_10_4_vdev_param {
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
 
 #define WMI_TXBF_STS_CAP_OFFSET_LSB    4
-#define WMI_TXBF_STS_CAP_OFFSET_MASK   0xf0
+#define WMI_TXBF_STS_CAP_OFFSET_MASK   0x70
+#define WMI_TXBF_CONF_IMPLICIT_BF       BIT(7)
 #define WMI_BF_SOUND_DIM_OFFSET_LSB    8
 #define WMI_BF_SOUND_DIM_OFFSET_MASK   0xf00
 
index b53eb2b85f02d115c4df6221f4a060942cdead1a..2ba8cf3f38afd4fa98c9038d43b8e4b838691114 100644 (file)
@@ -2766,7 +2766,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
        struct ieee80211_mgmt *mgmt;
        bool hidden = false;
        u8 *ies;
-       int ies_len;
        struct wmi_connect_cmd p;
        int res;
        int i, ret;
@@ -2804,7 +2803,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
        ies = mgmt->u.beacon.variable;
        if (ies > info->beacon.head + info->beacon.head_len)
                return -EINVAL;
-       ies_len = info->beacon.head + info->beacon.head_len - ies;
 
        if (info->ssid == NULL)
                return -EINVAL;
index 1379906bf8498831478ce5ebdf94701f93f18018..8da9506f8c2b868d00d8b23a2f2e3e14de75f9b6 100644 (file)
@@ -1001,7 +1001,7 @@ static void aggr_slice_amsdu(struct aggr_info *p_aggr,
 
        while (amsdu_len > mac_hdr_len) {
                hdr = (struct ethhdr *) framep;
-               payload_8023_len = ntohs(hdr->h_proto);
+               payload_8023_len = be16_to_cpu(hdr->h_proto);
 
                if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
                    payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
index 783a38f1a626a435f5f2eb6b39f2ef9724292f06..1f3523019509fe8fc28ebf329b3855652cb122f1 100644 (file)
@@ -61,13 +61,12 @@ config ATH9K_DEBUGFS
        depends on ATH9K && DEBUG_FS
        select MAC80211_DEBUGFS
        select ATH9K_COMMON_DEBUG
-       select RELAY
        ---help---
          Say Y, if you need access to ath9k's statistics for
          interrupts, rate control, etc.
 
-         Also required for changing debug message flags at run time.
-         As well as access to the FFT/spectral data and TX99.
+         Also required for changing debug message flags at run time and for
+         TX99.
 
 config ATH9K_STATION_STATISTICS
        bool "Detailed station statistics"
@@ -177,7 +176,6 @@ config ATH9K_HTC_DEBUGFS
        bool "Atheros ath9k_htc debugging"
        depends on ATH9K_HTC && DEBUG_FS
        select ATH9K_COMMON_DEBUG
-       select RELAY
        ---help---
          Say Y, if you need access to ath9k_htc's statistics.
          As well as access to the FFT/spectral data.
@@ -192,3 +190,11 @@ config ATH9K_HWRNG
 
          Say Y, feeds the entropy directly from the WiFi driver to the input
          pool.
+
+config ATH9K_COMMON_SPECTRAL
+       bool "Atheros ath9k/ath9k_htc spectral scan support"
+       depends on ATH9K_DEBUGFS || ATH9K_HTC_DEBUGFS
+       select RELAY
+       default n
+       ---help---
+         Say Y to enable access to the FFT/spectral data via debugfs.
index d804ce7391a0ee88cd544cb1eeb2dc7f128b0d3b..f71b2ad8275cdbacc741a2cc167bed29bcadc500 100644 (file)
@@ -62,8 +62,8 @@ ath9k_common-y:=      common.o \
                        common-init.o \
                        common-beacon.o \
 
-ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o \
-                                            common-spectral.o
+ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o
+ath9k_common-$(CONFIG_ATH9K_COMMON_SPECTRAL) += common-spectral.o
 
 ath9k_htc-y += htc_hst.o \
                hif_usb.o \
index 5d1a51d83aa646006f20298e206bb391830b5a78..303ab470ce34b6d92efaade5492ddd2515ccfdd2 100644 (file)
@@ -151,7 +151,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
        return bins[0] & 0x3f;
 }
 
-#ifdef CONFIG_ATH9K_COMMON_DEBUG
+#ifdef CONFIG_ATH9K_COMMON_SPECTRAL
 void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy);
 void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv);
 
@@ -183,6 +183,6 @@ static inline int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv,
 {
        return 0;
 }
-#endif /* CONFIG_ATH9K_COMMON_DEBUG */
+#endif /* CONFIG_ATH9K_COMMON_SPECTRAL */
 
 #endif /* SPECTRAL_H */
index 40a397fd0e0eebdcd5e8d0f40f7cd4ed33be7b68..6fee9a464ccea1c1b51478af79f2741613812b6e 100644 (file)
@@ -123,11 +123,9 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
                        fft = (struct ath9k_dfs_fft_40 *) (data + 2);
                        ath_dbg(common, DFS, "fixing datalen by 2\n");
                }
-               if (IS_CHAN_HT40MINUS(ah->curchan)) {
-                       int temp = is_ctl;
-                       is_ctl = is_ext;
-                       is_ext = temp;
-               }
+               if (IS_CHAN_HT40MINUS(ah->curchan))
+                       swap(is_ctl, is_ext);
+
                for (i = 0; i < FFT_NUM_SAMPLES; i++)
                        max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl,
                                                              is_ext);
index f808e5833d7e3217f476445e6a21db574b5cf394..a82ad739ab80653efd17b2eae0929fb4c224432f 100644 (file)
@@ -1683,6 +1683,10 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
                break;
        case IEEE80211_AMPDU_TX_OPERATIONAL:
+               if (tid >= ATH9K_HTC_MAX_TID) {
+                       ret = -EINVAL;
+                       break;
+               }
                ista = (struct ath9k_htc_sta *) sta->drv_priv;
                spin_lock_bh(&priv->tx.tx_lock);
                ista->tid_state[tid] = AGGR_OPERATIONAL;
index b765c647319dd6e3a4f20ed25554272f0f240eab..182963522941a436166bd665de147c5a678fb0e9 100644 (file)
@@ -348,6 +348,13 @@ enum wcn36xx_hal_host_msg_type {
        WCN36XX_HAL_DHCP_START_IND = 189,
        WCN36XX_HAL_DHCP_STOP_IND = 190,
 
+       /* Scan Offload(hw) APIs */
+       WCN36XX_HAL_START_SCAN_OFFLOAD_REQ = 204,
+       WCN36XX_HAL_START_SCAN_OFFLOAD_RSP = 205,
+       WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ = 206,
+       WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP = 207,
+       WCN36XX_HAL_SCAN_OFFLOAD_IND = 210,
+
        WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233,
 
        WCN36XX_HAL_PRINT_REG_INFO_IND = 259,
@@ -1115,6 +1122,101 @@ struct wcn36xx_hal_finish_scan_rsp_msg {
 
 } __packed;
 
+enum wcn36xx_hal_scan_type {
+       WCN36XX_HAL_SCAN_TYPE_PASSIVE = 0x00,
+       WCN36XX_HAL_SCAN_TYPE_ACTIVE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+struct wcn36xx_hal_mac_ssid {
+       u8 length;
+       u8 ssid[32];
+} __packed;
+
+struct wcn36xx_hal_start_scan_offload_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* BSSIDs hot list */
+       u8 num_bssid;
+       u8 bssids[4][ETH_ALEN];
+
+       /* Directed probe-requests will be sent for listed SSIDs (max 10)*/
+       u8 num_ssid;
+       struct wcn36xx_hal_mac_ssid ssids[10];
+
+       /* Report AP with hidden ssid */
+       u8 scan_hidden;
+
+       /* Self MAC address */
+       u8 mac[ETH_ALEN];
+
+       /* BSS type */
+       enum wcn36xx_hal_bss_type bss_type;
+
+       /* Scan type */
+       enum wcn36xx_hal_scan_type scan_type;
+
+       /* Minimum scanning time on each channel (ms) */
+       u32 min_ch_time;
+
+       /* Maximum scanning time on each channel */
+       u32 max_ch_time;
+
+       /* Is a p2p search */
+       u8 p2p_search;
+
+       /* Channels to scan */
+       u8 num_channel;
+       u8 channels[80];
+
+       /* IE field */
+       u16 ie_len;
+       u8 ie[0];
+} __packed;
+
+struct wcn36xx_hal_start_scan_offload_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+} __packed;
+
+enum wcn36xx_hal_scan_offload_ind_type {
+       /* Scan has been started */
+       WCN36XX_HAL_SCAN_IND_STARTED = 0x01,
+       /* Scan has been completed */
+       WCN36XX_HAL_SCAN_IND_COMPLETED = 0x02,
+       /* Moved to foreign channel */
+       WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL = 0x08,
+       /* scan request has been dequeued */
+       WCN36XX_HAL_SCAN_IND_DEQUEUED = 0x10,
+       /* preempted by other high priority scan */
+       WCN36XX_HAL_SCAN_IND_PREEMPTED = 0x20,
+       /* scan start failed */
+       WCN36XX_HAL_SCAN_IND_FAILED = 0x40,
+        /*scan restarted */
+       WCN36XX_HAL_SCAN_IND_RESTARTED = 0x80,
+       WCN36XX_HAL_SCAN_IND_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+struct wcn36xx_hal_scan_offload_ind {
+       struct wcn36xx_hal_msg_header header;
+
+       u32 type;
+       u32 channel_mhz;
+       u32 scan_id;
+} __packed;
+
+struct wcn36xx_hal_stop_scan_offload_req_msg {
+       struct wcn36xx_hal_msg_header header;
+} __packed;
+
+struct wcn36xx_hal_stop_scan_offload_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+} __packed;
+
 enum wcn36xx_hal_rate_index {
        HW_RATE_INDEX_1MBPS     = 0x82,
        HW_RATE_INDEX_2MBPS     = 0x84,
@@ -1507,11 +1609,6 @@ struct wcn36xx_hal_edca_param_record {
        u16 txop_limit;
 } __packed;
 
-struct wcn36xx_hal_mac_ssid {
-       u8 length;
-       u8 ssid[32];
-} __packed;
-
 /* Concurrency role. These are generic IDs that identify the various roles
  *  in the software system. */
 enum wcn36xx_hal_con_mode {
index f7d228b5ba933f744474be119802f41e461c5715..5bed323f110087555c8ce76417591d78e5e88e00 100644 (file)
@@ -629,7 +629,6 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
                           struct ieee80211_scan_request *hw_req)
 {
        struct wcn36xx *wcn = hw->priv;
-
        mutex_lock(&wcn->scan_lock);
        if (wcn->scan_req) {
                mutex_unlock(&wcn->scan_lock);
@@ -638,11 +637,16 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
 
        wcn->scan_aborted = false;
        wcn->scan_req = &hw_req->req;
+
        mutex_unlock(&wcn->scan_lock);
 
-       schedule_work(&wcn->scan_work);
+       if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+               /* legacy manual/sw scan */
+               schedule_work(&wcn->scan_work);
+               return 0;
+       }
 
-       return 0;
+       return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req);
 }
 
 static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
@@ -650,6 +654,12 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
 {
        struct wcn36xx *wcn = hw->priv;
 
+       if (!wcn36xx_smd_stop_hw_scan(wcn)) {
+               struct cfg80211_scan_info scan_info = { .aborted = true };
+
+               ieee80211_scan_completed(wcn->hw, &scan_info);
+       }
+
        mutex_lock(&wcn->scan_lock);
        wcn->scan_aborted = true;
        mutex_unlock(&wcn->scan_lock);
index 9c6590d5348ad53f64d130287f51b9fe7ccf95d8..2914618a03359742915974de895ede300b977ce0 100644 (file)
@@ -73,6 +73,8 @@ static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
        WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1),
        WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1),
        WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0),
+       WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_BT, 120000),
+       WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_WLAN, 30000),
        WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
        WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
 };
@@ -613,6 +615,85 @@ out:
        return ret;
 }
 
+int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+                             struct cfg80211_scan_request *req)
+{
+       struct wcn36xx_hal_start_scan_offload_req_msg msg_body;
+       int ret, i;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_OFFLOAD_REQ);
+
+       msg_body.scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE;
+       msg_body.min_ch_time = 30;
+       msg_body.min_ch_time = 100;
+       msg_body.scan_hidden = 1;
+       memcpy(msg_body.mac, vif->addr, ETH_ALEN);
+       msg_body.p2p_search = vif->p2p;
+
+       msg_body.num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body.ssids));
+       for (i = 0; i < msg_body.num_ssid; i++) {
+               msg_body.ssids[i].length = min_t(u8, req->ssids[i].ssid_len,
+                                               sizeof(msg_body.ssids[i].ssid));
+               memcpy(msg_body.ssids[i].ssid, req->ssids[i].ssid,
+                      msg_body.ssids[i].length);
+       }
+
+       msg_body.num_channel = min_t(u8, req->n_channels,
+                                    sizeof(msg_body.channels));
+       for (i = 0; i < msg_body.num_channel; i++)
+               msg_body.channels[i] = req->channels[i]->hw_value;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal start hw-scan (channels: %u; ssids: %u; p2p: %s)\n",
+                   msg_body.num_channel, msg_body.num_ssid,
+                   msg_body.p2p_search ? "yes" : "no");
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_start_scan_offload failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_start_scan_offload response failed err=%d\n",
+                           ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn)
+{
+       struct wcn36xx_hal_stop_scan_offload_req_msg msg_body;
+       int ret;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ);
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL, "hal stop hw-scan\n");
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_stop_scan_offload failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_stop_scan_offload response failed err=%d\n",
+                           ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
 static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
 {
        struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
@@ -2039,6 +2120,40 @@ static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
        return 0;
 }
 
+static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len)
+{
+       struct wcn36xx_hal_scan_offload_ind *rsp = buf;
+       struct cfg80211_scan_info scan_info = {};
+
+       if (len != sizeof(*rsp)) {
+               wcn36xx_warn("Corrupted delete scan indication\n");
+               return -EIO;
+       }
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)", rsp->type);
+
+       switch (rsp->type) {
+       case WCN36XX_HAL_SCAN_IND_FAILED:
+               scan_info.aborted = true;
+       case WCN36XX_HAL_SCAN_IND_COMPLETED:
+               mutex_lock(&wcn->scan_lock);
+               wcn->scan_req = NULL;
+               mutex_unlock(&wcn->scan_lock);
+               ieee80211_scan_completed(wcn->hw, &scan_info);
+               break;
+       case WCN36XX_HAL_SCAN_IND_STARTED:
+       case WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL:
+       case WCN36XX_HAL_SCAN_IND_DEQUEUED:
+       case WCN36XX_HAL_SCAN_IND_PREEMPTED:
+       case WCN36XX_HAL_SCAN_IND_RESTARTED:
+               break;
+       default:
+               wcn36xx_warn("Unknown scan indication type %x\n", rsp->type);
+       }
+
+       return 0;
+}
+
 static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
                                         void *buf,
                                         size_t len)
@@ -2250,6 +2365,8 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
        case WCN36XX_HAL_CH_SWITCH_RSP:
        case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
        case WCN36XX_HAL_8023_MULTICAST_LIST_RSP:
+       case WCN36XX_HAL_START_SCAN_OFFLOAD_RSP:
+       case WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP:
                memcpy(wcn->hal_buf, buf, len);
                wcn->hal_rsp_len = len;
                complete(&wcn->hal_rsp_compl);
@@ -2262,6 +2379,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
        case WCN36XX_HAL_MISSED_BEACON_IND:
        case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
        case WCN36XX_HAL_PRINT_REG_INFO_IND:
+       case WCN36XX_HAL_SCAN_OFFLOAD_IND:
                msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC);
                if (!msg_ind) {
                        wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
@@ -2298,6 +2416,8 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
        hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
                                       struct wcn36xx_hal_ind_msg,
                                       list);
+       list_del(wcn->hal_ind_queue.next);
+       spin_unlock_irqrestore(&wcn->hal_ind_lock, flags);
 
        msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
 
@@ -2326,12 +2446,14 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
                                               hal_ind_msg->msg,
                                               hal_ind_msg->msg_len);
                break;
+       case WCN36XX_HAL_SCAN_OFFLOAD_IND:
+               wcn36xx_smd_hw_scan_ind(wcn, hal_ind_msg->msg,
+                                       hal_ind_msg->msg_len);
+               break;
        default:
                wcn36xx_err("SMD_EVENT (%d) not supported\n",
                              msg_header->msg_type);
        }
-       list_del(wcn->hal_ind_queue.next);
-       spin_unlock_irqrestore(&wcn->hal_ind_lock, flags);
        kfree(hal_ind_msg);
 }
 int wcn36xx_smd_open(struct wcn36xx *wcn)
index 013fc9546f56d4b48d60a3452b4a72d3204cdaa6..8076edf40ac80a1a5889094a7a8ea9c84fb8b412 100644 (file)
@@ -65,6 +65,9 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel);
 int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
                            enum wcn36xx_hal_sys_mode mode);
 int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count);
+int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+                             struct cfg80211_scan_request *req);
+int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn);
 int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
 int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
 int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
index 85d5c04618ebcafb87f8a089f31dac3b393f0dc5..771a534d6ca90e69d4de171a8d086955767956d6 100644 (file)
@@ -901,7 +901,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                         u64 *cookie)
 {
        const u8 *buf = params->buf;
-       size_t len = params->len;
+       size_t len = params->len, total;
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
        int rc;
        bool tx_status = false;
@@ -926,7 +926,11 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        if (len < sizeof(struct ieee80211_hdr_3addr))
                return -EINVAL;
 
-       cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
+       total = sizeof(*cmd) + len;
+       if (total < len)
+               return -EINVAL;
+
+       cmd = kmalloc(total, GFP_KERNEL);
        if (!cmd) {
                rc = -ENOMEM;
                goto out;
@@ -936,7 +940,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        cmd->len = cpu_to_le16(len);
        memcpy(cmd->payload, buf, len);
 
-       rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
+       rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, total,
                      WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
        if (rc == 0)
                tx_status = !evt.evt.status;
@@ -1727,9 +1731,12 @@ static int wil_cfg80211_suspend(struct wiphy *wiphy,
 
        wil_dbg_pm(wil, "suspending\n");
 
-       wil_p2p_stop_discovery(wil);
-
+       mutex_lock(&wil->mutex);
+       mutex_lock(&wil->p2p_wdev_mutex);
+       wil_p2p_stop_radio_operations(wil);
        wil_abort_scan(wil, true);
+       mutex_unlock(&wil->p2p_wdev_mutex);
+       mutex_unlock(&wil->mutex);
 
 out:
        return rc;
index e58dc6dc1f9ccf211966bbcdce2535fe9df9a3de..4475937faf250c572ea043564a4bdd65a02837ec 100644 (file)
@@ -242,12 +242,19 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
 static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
 {
        struct wil6210_priv *wil = s->private;
+       int ret;
+
+       ret = wil_pm_runtime_get(wil);
+       if (ret < 0)
+               return ret;
 
        wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
                       offsetof(struct wil6210_mbox_ctl, tx));
        wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
                       offsetof(struct wil6210_mbox_ctl, rx));
 
+       wil_pm_runtime_put(wil);
+
        return 0;
 }
 
@@ -265,15 +272,37 @@ static const struct file_operations fops_mbox = {
 
 static int wil_debugfs_iomem_x32_set(void *data, u64 val)
 {
-       writel(val, (void __iomem *)data);
+       struct wil_debugfs_iomem_data *d = (struct
+                                           wil_debugfs_iomem_data *)data;
+       struct wil6210_priv *wil = d->wil;
+       int ret;
+
+       ret = wil_pm_runtime_get(wil);
+       if (ret < 0)
+               return ret;
+
+       writel(val, (void __iomem *)d->offset);
        wmb(); /* make sure write propagated to HW */
 
+       wil_pm_runtime_put(wil);
+
        return 0;
 }
 
 static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
 {
-       *val = readl((void __iomem *)data);
+       struct wil_debugfs_iomem_data *d = (struct
+                                           wil_debugfs_iomem_data *)data;
+       struct wil6210_priv *wil = d->wil;
+       int ret;
+
+       ret = wil_pm_runtime_get(wil);
+       if (ret < 0)
+               return ret;
+
+       *val = readl((void __iomem *)d->offset);
+
+       wil_pm_runtime_put(wil);
 
        return 0;
 }
@@ -284,10 +313,21 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
 static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
                                                   umode_t mode,
                                                   struct dentry *parent,
-                                                  void *value)
+                                                  void *value,
+                                                  struct wil6210_priv *wil)
 {
-       return debugfs_create_file(name, mode, parent, value,
-                                  &fops_iomem_x32);
+       struct dentry *file;
+       struct wil_debugfs_iomem_data *data = &wil->dbg_data.data_arr[
+                                             wil->dbg_data.iomem_data_count];
+
+       data->wil = wil;
+       data->offset = value;
+
+       file = debugfs_create_file(name, mode, parent, data, &fops_iomem_x32);
+       if (!IS_ERR_OR_NULL(file))
+               wil->dbg_data.iomem_data_count++;
+
+       return file;
 }
 
 static int wil_debugfs_ulong_set(void *data, u64 val)
@@ -346,7 +386,8 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
                case doff_io32:
                        f = wil_debugfs_create_iomem_x32(tbl[i].name,
                                                         tbl[i].mode, dbg,
-                                                        base + tbl[i].off);
+                                                        base + tbl[i].off,
+                                                        wil);
                        break;
                case doff_u8:
                        f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
@@ -475,13 +516,22 @@ static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
 static int wil_memread_debugfs_show(struct seq_file *s, void *data)
 {
        struct wil6210_priv *wil = s->private;
-       void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+       void __iomem *a;
+       int ret;
+
+       ret = wil_pm_runtime_get(wil);
+       if (ret < 0)
+               return ret;
+
+       a = wmi_buffer(wil, cpu_to_le32(mem_addr));
 
        if (a)
                seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
        else
                seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
 
+       wil_pm_runtime_put(wil);
+
        return 0;
 }
 
@@ -502,10 +552,12 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
 {
        enum { max_count = 4096 };
        struct wil_blob_wrapper *wil_blob = file->private_data;
+       struct wil6210_priv *wil = wil_blob->wil;
        loff_t pos = *ppos;
        size_t available = wil_blob->blob.size;
        void *buf;
        size_t ret;
+       int rc;
 
        if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
            test_bit(wil_status_suspended, wil_blob->wil->status))
@@ -526,10 +578,19 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
        if (!buf)
                return -ENOMEM;
 
+       rc = wil_pm_runtime_get(wil);
+       if (rc < 0) {
+               kfree(buf);
+               return rc;
+       }
+
        wil_memcpy_fromio_32(buf, (const void __iomem *)
                             wil_blob->blob.data + pos, count);
 
        ret = copy_to_user(user_buf, buf, count);
+
+       wil_pm_runtime_put(wil);
+
        kfree(buf);
        if (ret == count)
                return -EFAULT;
@@ -1571,8 +1632,6 @@ static ssize_t wil_write_suspend_stats(struct file *file,
        struct wil6210_priv *wil = file->private_data;
 
        memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
-       wil->suspend_stats.min_suspend_time = ULONG_MAX;
-       wil->suspend_stats.collection_start = ktime_get();
 
        return len;
 }
@@ -1582,33 +1641,41 @@ static ssize_t wil_read_suspend_stats(struct file *file,
                                      size_t count, loff_t *ppos)
 {
        struct wil6210_priv *wil = file->private_data;
-       static char text[400];
-       int n;
-       unsigned long long stats_collection_time =
-               ktime_to_us(ktime_sub(ktime_get(),
-                                     wil->suspend_stats.collection_start));
+       char *text;
+       int n, ret, text_size = 500;
 
-       n = snprintf(text, sizeof(text),
-                    "Suspend statistics:\n"
+       text = kmalloc(text_size, GFP_KERNEL);
+       if (!text)
+               return -ENOMEM;
+
+       n = snprintf(text, text_size,
+                    "Radio on suspend statistics:\n"
+                    "successful suspends:%ld failed suspends:%ld\n"
+                    "successful resumes:%ld failed resumes:%ld\n"
+                    "rejected by device:%ld\n"
+                    "Radio off suspend statistics:\n"
                     "successful suspends:%ld failed suspends:%ld\n"
                     "successful resumes:%ld failed resumes:%ld\n"
-                    "rejected by host:%ld rejected by device:%ld\n"
-                    "total suspend time:%lld min suspend time:%lld\n"
-                    "max suspend time:%lld stats collection time: %lld\n",
-                    wil->suspend_stats.successful_suspends,
-                    wil->suspend_stats.failed_suspends,
-                    wil->suspend_stats.successful_resumes,
-                    wil->suspend_stats.failed_resumes,
-                    wil->suspend_stats.rejected_by_host,
+                    "General statistics:\n"
+                    "rejected by host:%ld\n",
+                    wil->suspend_stats.r_on.successful_suspends,
+                    wil->suspend_stats.r_on.failed_suspends,
+                    wil->suspend_stats.r_on.successful_resumes,
+                    wil->suspend_stats.r_on.failed_resumes,
                     wil->suspend_stats.rejected_by_device,
-                    wil->suspend_stats.total_suspend_time,
-                    wil->suspend_stats.min_suspend_time,
-                    wil->suspend_stats.max_suspend_time,
-                    stats_collection_time);
+                    wil->suspend_stats.r_off.successful_suspends,
+                    wil->suspend_stats.r_off.failed_suspends,
+                    wil->suspend_stats.r_off.successful_resumes,
+                    wil->suspend_stats.r_off.failed_resumes,
+                    wil->suspend_stats.rejected_by_host);
+
+       n = min_t(int, n, text_size);
 
-       n = min_t(int, n, sizeof(text));
+       ret = simple_read_from_buffer(user_buf, count, ppos, text, n);
 
-       return simple_read_from_buffer(user_buf, count, ppos, text, n);
+       kfree(text);
+
+       return ret;
 }
 
 static const struct file_operations fops_suspend_stats = {
@@ -1736,14 +1803,31 @@ static const struct dbg_off dbg_statics[] = {
        {},
 };
 
+static const int dbg_off_count = 4 * (ARRAY_SIZE(isr_off) - 1) +
+                               ARRAY_SIZE(dbg_wil_regs) - 1 +
+                               ARRAY_SIZE(pseudo_isr_off) - 1 +
+                               ARRAY_SIZE(lgc_itr_cnt_off) - 1 +
+                               ARRAY_SIZE(tx_itr_cnt_off) - 1 +
+                               ARRAY_SIZE(rx_itr_cnt_off) - 1;
+
 int wil6210_debugfs_init(struct wil6210_priv *wil)
 {
        struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
                        wil_to_wiphy(wil)->debugfsdir);
-
        if (IS_ERR_OR_NULL(dbg))
                return -ENODEV;
 
+       wil->dbg_data.data_arr = kcalloc(dbg_off_count,
+                                        sizeof(struct wil_debugfs_iomem_data),
+                                        GFP_KERNEL);
+       if (!wil->dbg_data.data_arr) {
+               debugfs_remove_recursive(dbg);
+               wil->debug = NULL;
+               return -ENOMEM;
+       }
+
+       wil->dbg_data.iomem_data_count = 0;
+
        wil_pmc_init(wil);
 
        wil6210_debugfs_init_files(wil, dbg);
@@ -1758,8 +1842,6 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
 
        wil6210_debugfs_create_ITR_CNT(wil, dbg);
 
-       wil->suspend_stats.collection_start = ktime_get();
-
        return 0;
 }
 
@@ -1768,6 +1850,8 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil)
        debugfs_remove_recursive(wil->debug);
        wil->debug = NULL;
 
+       kfree(wil->dbg_data.data_arr);
+
        /* free pmc memory without sending command to fw, as it will
         * be reset on the way down anyway
         */
index adcfef4dabf756da33acd4d6bab05f09f9afdb7e..66200f616a37717bdf0b7816055d1be3b55d447f 100644 (file)
@@ -47,9 +47,14 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
        struct wil6210_priv *wil = ndev_to_wil(ndev);
        u32 tx_itr_en, tx_itr_val = 0;
        u32 rx_itr_en, rx_itr_val = 0;
+       int ret;
 
        wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
 
+       ret = wil_pm_runtime_get(wil);
+       if (ret < 0)
+               return ret;
+
        tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
        if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
                tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
@@ -58,6 +63,8 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
        if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
                rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
 
+       wil_pm_runtime_put(wil);
+
        cp->tx_coalesce_usecs = tx_itr_val;
        cp->rx_coalesce_usecs = rx_itr_val;
        return 0;
@@ -67,6 +74,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
                                       struct ethtool_coalesce *cp)
 {
        struct wil6210_priv *wil = ndev_to_wil(ndev);
+       int ret;
 
        wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
                     cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
@@ -86,8 +94,15 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
 
        wil->tx_max_burst_duration = cp->tx_coalesce_usecs;
        wil->rx_max_burst_duration = cp->rx_coalesce_usecs;
+
+       ret = wil_pm_runtime_get(wil);
+       if (ret < 0)
+               return ret;
+
        wil_configure_interrupt_moderation(wil);
 
+       wil_pm_runtime_put(wil);
+
        return 0;
 
 out_bad:
index e01acac88825d895dbae09fe67f6e21b0ad3fbd2..77d1902947e330b93264e4216e1fbb35164cca76 100644 (file)
                                             prefix_type, rowsize,      \
                                             groupsize, buf, len, ascii)
 
-#define FW_ADDR_CHECK(ioaddr, val, msg) do { \
-               ioaddr = wmi_buffer(wil, val); \
-               if (!ioaddr) { \
-                       wil_err_fw(wil, "bad " msg ": 0x%08x\n", \
-                                  le32_to_cpu(val)); \
-                       return -EINVAL; \
-               } \
-       } while (0)
+static bool wil_fw_addr_check(struct wil6210_priv *wil,
+                             void __iomem **ioaddr, __le32 val,
+                             u32 size, const char *msg)
+{
+       *ioaddr = wmi_buffer_block(wil, val, size);
+       if (!(*ioaddr)) {
+               wil_err_fw(wil, "bad %s: 0x%08x\n", msg, le32_to_cpu(val));
+               return false;
+       }
+       return true;
+}
 
 /**
  * wil_fw_verify - verify firmware file validity
@@ -124,24 +127,19 @@ static int fw_ignore_section(struct wil6210_priv *wil, const void *data,
        return 0;
 }
 
-static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
-                            size_t size)
-{
-       wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, data, size, true);
-
-       return 0;
-}
-
 static int
-fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
-                      size_t size)
+fw_handle_comment(struct wil6210_priv *wil, const void *data,
+                 size_t size)
 {
        const struct wil_fw_record_capabilities *rec = data;
        size_t capa_size;
 
        if (size < sizeof(*rec) ||
-           le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC)
+           le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC) {
+               wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1,
+                               data, size, true);
                return 0;
+       }
 
        capa_size = size - offsetof(struct wil_fw_record_capabilities,
                                    capabilities);
@@ -165,7 +163,8 @@ static int fw_handle_data(struct wil6210_priv *wil, const void *data,
                return -EINVAL;
        }
 
-       FW_ADDR_CHECK(dst, d->addr, "address");
+       if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+               return -EINVAL;
        wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr),
                   s);
        wil_memcpy_toio_32(dst, d->data, s);
@@ -197,7 +196,8 @@ static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
                return -EINVAL;
        }
 
-       FW_ADDR_CHECK(dst, d->addr, "address");
+       if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+               return -EINVAL;
 
        v = le32_to_cpu(d->value);
        wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n",
@@ -253,7 +253,8 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
                u32 v = le32_to_cpu(block[i].value);
                u32 x, y;
 
-               FW_ADDR_CHECK(dst, block[i].addr, "address");
+               if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address"))
+                       return -EINVAL;
 
                x = readl(dst);
                y = (x & m) | (v & ~m);
@@ -319,10 +320,15 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
        wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n",
                   n, gw_cmd);
 
-       FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
-       FW_ADDR_CHECK(gwa_val, d->gateway_value_addr, "gateway_value_addr");
-       FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
-       FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+       if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+                              "gateway_addr_addr") ||
+           !wil_fw_addr_check(wil, &gwa_val, d->gateway_value_addr, 0,
+                              "gateway_value_addr") ||
+           !wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+                              "gateway_cmd_addr") ||
+           !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+                              "gateway_ctrl_address"))
+               return -EINVAL;
 
        wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x"
                   " cmd 0x%08x ctl 0x%08x\n",
@@ -378,12 +384,19 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
        wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n",
                   n, gw_cmd);
 
-       FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
+       if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+                              "gateway_addr_addr"))
+               return -EINVAL;
        for (k = 0; k < ARRAY_SIZE(block->value); k++)
-               FW_ADDR_CHECK(gwa_val[k], d->gateway_value_addr[k],
-                             "gateway_value_addr");
-       FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
-       FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+               if (!wil_fw_addr_check(wil, &gwa_val[k],
+                                      d->gateway_value_addr[k],
+                                      0, "gateway_value_addr"))
+                       return -EINVAL;
+       if (!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+                              "gateway_cmd_addr") ||
+           !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+                              "gateway_ctrl_address"))
+               return -EINVAL;
 
        wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n",
                   le32_to_cpu(d->gateway_addr_addr),
@@ -422,7 +435,7 @@ static const struct {
        int (*parse_handler)(struct wil6210_priv *wil, const void *data,
                             size_t size);
 } wil_fw_handlers[] = {
-       {wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities},
+       {wil_fw_type_comment, fw_handle_comment, fw_handle_comment},
        {wil_fw_type_data, fw_handle_data, fw_ignore_section},
        {wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
        /* wil_fw_type_action */
@@ -517,7 +530,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name,
 
        rc = request_firmware(&fw, name, wil_to_dev(wil));
        if (rc) {
-               wil_err_fw(wil, "Failed to load firmware %s\n", name);
+               wil_err_fw(wil, "Failed to load firmware %s rc %d\n", name, rc);
                return rc;
        }
        wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size);
index 59def4f3fcf3dcb1e65fead988befe4dae02b226..5cf341702dc117cf6d0c362fa648bb836de9002e 100644 (file)
@@ -358,6 +358,25 @@ static void wil_cache_mbox_regs(struct wil6210_priv *wil)
        wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
 }
 
+static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
+{
+       size_t min_size = sizeof(struct wil6210_mbox_hdr) +
+               sizeof(struct wmi_cmd_hdr);
+
+       if (wil->mbox_ctl.rx.entry_size < min_size) {
+               wil_err(wil, "rx mbox entry too small (%d)\n",
+                       wil->mbox_ctl.rx.entry_size);
+               return false;
+       }
+       if (wil->mbox_ctl.tx.entry_size < min_size) {
+               wil_err(wil, "tx mbox entry too small (%d)\n",
+                       wil->mbox_ctl.tx.entry_size);
+               return false;
+       }
+
+       return true;
+}
+
 static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
 {
        struct wil6210_priv *wil = cookie;
@@ -393,7 +412,8 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
        if (isr & ISR_MISC_FW_READY) {
                wil_dbg_irq(wil, "IRQ: FW ready\n");
                wil_cache_mbox_regs(wil);
-               set_bit(wil_status_mbox_ready, wil->status);
+               if (wil_validate_mbox_regs(wil))
+                       set_bit(wil_status_mbox_ready, wil->status);
                /**
                 * Actual FW ready indicated by the
                 * WMI_FW_READY_EVENTID
index 885924abf61cc75a4e129ac16347c2b77053ec9d..1b53cd3f272b70d0670475d5cfee242eb7eb7b6d 100644 (file)
@@ -579,7 +579,6 @@ int wil_priv_init(struct wil6210_priv *wil)
        wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
                              WMI_WAKEUP_TRIGGER_BCAST;
        memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
-       wil->suspend_stats.min_suspend_time = ULONG_MAX;
        wil->vring_idle_trsh = 16;
 
        return 0;
@@ -760,6 +759,8 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
        u8 retry_short;
        int rc;
 
+       wil_refresh_fw_capabilities(wil);
+
        rc = wmi_get_mgmt_retry(wil, &retry_short);
        if (!rc) {
                wiphy->retry_short = retry_short;
@@ -767,6 +768,25 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
        }
 }
 
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
+{
+       struct wiphy *wiphy = wil_to_wiphy(wil);
+
+       wil->keep_radio_on_during_sleep =
+               wil->platform_ops.keep_radio_on_during_sleep &&
+               wil->platform_ops.keep_radio_on_during_sleep(
+                       wil->platform_handle) &&
+               test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
+
+       wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
+                wil->keep_radio_on_during_sleep);
+
+       if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+               wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+       else
+               wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+}
+
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
 {
        le32_to_cpus(&r->base);
@@ -1071,11 +1091,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
                        return rc;
                }
 
+               wil_collect_fw_info(wil);
+
                if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT)
                        wil_ps_update(wil, wil->ps_profile);
 
-               wil_collect_fw_info(wil);
-
                if (wil->platform_ops.notify) {
                        rc = wil->platform_ops.notify(wil->platform_handle,
                                                      WIL_PLATFORM_EVT_FW_RDY);
index 4a6ab2d0fdf17c138fedc836c6af18d2c43a919e..b641ac17a05374ad4ec3713c8d155c8803ededde 100644 (file)
@@ -21,6 +21,7 @@
 static int wil_open(struct net_device *ndev)
 {
        struct wil6210_priv *wil = ndev_to_wil(ndev);
+       int rc;
 
        wil_dbg_misc(wil, "open\n");
 
@@ -30,16 +31,29 @@ static int wil_open(struct net_device *ndev)
                return -EINVAL;
        }
 
-       return wil_up(wil);
+       rc = wil_pm_runtime_get(wil);
+       if (rc < 0)
+               return rc;
+
+       rc = wil_up(wil);
+       if (rc)
+               wil_pm_runtime_put(wil);
+
+       return rc;
 }
 
 static int wil_stop(struct net_device *ndev)
 {
        struct wil6210_priv *wil = ndev_to_wil(ndev);
+       int rc;
 
        wil_dbg_misc(wil, "stop\n");
 
-       return wil_down(wil);
+       rc = wil_down(wil);
+       if (!rc)
+               wil_pm_runtime_put(wil);
+
+       return rc;
 }
 
 static const struct net_device_ops wil_netdev_ops = {
index 6a3ab4bf916dd85a38a8f47243478b4ea12c3ea1..42a5480c764d360ac5d49659f97dade4cfcf2c3f 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/suspend.h>
 #include "wil6210.h"
 #include <linux/rtnetlink.h>
+#include <linux/pm_runtime.h>
 
 static bool use_msi = true;
 module_param(use_msi, bool, 0444);
@@ -31,10 +32,8 @@ module_param(ftm_mode, bool, 0444);
 MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 static int wil6210_pm_notify(struct notifier_block *notify_block,
                             unsigned long mode, void *unused);
-#endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
 static
@@ -84,9 +83,7 @@ void wil_set_capabilities(struct wil6210_priv *wil)
 
        /* extract FW capabilities from file without loading the FW */
        wil_request_firmware(wil, wil->wil_fw_name, false);
-
-       if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
-               wil_to_wiphy(wil)->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+       wil_refresh_fw_capabilities(wil);
 }
 
 void wil_disable_irq(struct wil6210_priv *wil)
@@ -296,15 +293,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        wil_set_capabilities(wil);
        wil6210_clear_irq(wil);
 
-       wil->keep_radio_on_during_sleep =
-               wil->platform_ops.keep_radio_on_during_sleep &&
-               wil->platform_ops.keep_radio_on_during_sleep(
-                       wil->platform_handle) &&
-               test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
-
-       wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
-                wil->keep_radio_on_during_sleep);
-
        /* FW should raise IRQ when ready */
        rc = wil_if_pcie_enable(wil);
        if (rc) {
@@ -320,7 +308,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
        wil->pm_notify.notifier_call = wil6210_pm_notify;
        rc = register_pm_notifier(&wil->pm_notify);
        if (rc)
@@ -328,11 +315,11 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                 * be prevented in a later phase if needed
                 */
                wil_err(wil, "register_pm_notifier failed: %d\n", rc);
-#endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
        wil6210_debugfs_init(wil);
 
+       wil_pm_runtime_allow(wil);
 
        return 0;
 
@@ -360,11 +347,11 @@ static void wil_pcie_remove(struct pci_dev *pdev)
        wil_dbg_misc(wil, "pcie_remove\n");
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
        unregister_pm_notifier(&wil->pm_notify);
-#endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
+       wil_pm_runtime_forbid(wil);
+
        wil6210_debugfs_remove(wil);
        rtnl_lock();
        wil_p2p_wdev_free(wil);
@@ -386,13 +373,15 @@ static const struct pci_device_id wil6210_pcie_ids[] = {
 MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 
 static int wil6210_suspend(struct device *dev, bool is_runtime)
 {
        int rc = 0;
        struct pci_dev *pdev = to_pci_dev(dev);
        struct wil6210_priv *wil = pci_get_drvdata(pdev);
+       struct net_device *ndev = wil_to_ndev(wil);
+       bool keep_radio_on = ndev->flags & IFF_UP &&
+                            wil->keep_radio_on_during_sleep;
 
        wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
 
@@ -400,16 +389,18 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
        if (rc)
                goto out;
 
-       rc = wil_suspend(wil, is_runtime);
+       rc = wil_suspend(wil, is_runtime, keep_radio_on);
        if (!rc) {
-               wil->suspend_stats.successful_suspends++;
-
-               /* If platform device supports keep_radio_on_during_sleep
-                * it will control PCIe master
+               /* In case radio stays on, platform device will control
+                * PCIe master
                 */
-               if (!wil->keep_radio_on_during_sleep)
+               if (!keep_radio_on) {
                        /* disable bus mastering */
                        pci_clear_master(pdev);
+                       wil->suspend_stats.r_off.successful_suspends++;
+               } else {
+                       wil->suspend_stats.r_on.successful_suspends++;
+               }
        }
 out:
        return rc;
@@ -420,23 +411,32 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
        int rc = 0;
        struct pci_dev *pdev = to_pci_dev(dev);
        struct wil6210_priv *wil = pci_get_drvdata(pdev);
+       struct net_device *ndev = wil_to_ndev(wil);
+       bool keep_radio_on = ndev->flags & IFF_UP &&
+                            wil->keep_radio_on_during_sleep;
 
        wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
 
-       /* If platform device supports keep_radio_on_during_sleep it will
-        * control PCIe master
+       /* In case radio stays on, platform device will control
+        * PCIe master
         */
-       if (!wil->keep_radio_on_during_sleep)
+       if (!keep_radio_on)
                /* allow master */
                pci_set_master(pdev);
-       rc = wil_resume(wil, is_runtime);
+       rc = wil_resume(wil, is_runtime, keep_radio_on);
        if (rc) {
                wil_err(wil, "device failed to resume (%d)\n", rc);
-               wil->suspend_stats.failed_resumes++;
-               if (!wil->keep_radio_on_during_sleep)
+               if (!keep_radio_on) {
                        pci_clear_master(pdev);
+                       wil->suspend_stats.r_off.failed_resumes++;
+               } else {
+                       wil->suspend_stats.r_on.failed_resumes++;
+               }
        } else {
-               wil->suspend_stats.successful_resumes++;
+               if (keep_radio_on)
+                       wil->suspend_stats.r_on.successful_resumes++;
+               else
+                       wil->suspend_stats.r_off.successful_resumes++;
        }
 
        return rc;
@@ -490,12 +490,43 @@ static int wil6210_pm_resume(struct device *dev)
 {
        return wil6210_resume(dev, false);
 }
-#endif /* CONFIG_PM_SLEEP */
 
+static int wil6210_pm_runtime_idle(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+       wil_dbg_pm(wil, "Runtime idle\n");
+
+       return wil_can_suspend(wil, true);
+}
+
+static int wil6210_pm_runtime_resume(struct device *dev)
+{
+       return wil6210_resume(dev, true);
+}
+
+static int wil6210_pm_runtime_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+       if (test_bit(wil_status_suspended, wil->status)) {
+               wil_dbg_pm(wil, "trying to suspend while suspended\n");
+               return 1;
+       }
+
+       return wil6210_suspend(dev, true);
+}
 #endif /* CONFIG_PM */
 
 static const struct dev_pm_ops wil6210_pm_ops = {
+#ifdef CONFIG_PM
        SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+       SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend,
+                          wil6210_pm_runtime_resume,
+                          wil6210_pm_runtime_idle)
+#endif /* CONFIG_PM */
 };
 
 static struct pci_driver wil6210_driver = {
index 8f5d1b447aaa2d72a51bfab141cc81b33c7278d7..056b180fad7fe477fdc7134a3fcf66cd85f04081 100644 (file)
 
 #include "wil6210.h"
 #include <linux/jiffies.h>
+#include <linux/pm_runtime.h>
+
+#define WIL6210_AUTOSUSPEND_DELAY_MS (1000)
 
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
 {
        int rc = 0;
        struct wireless_dev *wdev = wil->wdev;
        struct net_device *ndev = wil_to_ndev(wil);
+       bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY,
+                                wil->fw_capabilities);
 
        wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system");
 
+       if (wmi_only || debug_fw) {
+               wil_dbg_pm(wil, "Deny any suspend - %s mode\n",
+                          wmi_only ? "wmi_only" : "debug_fw");
+               rc = -EBUSY;
+               goto out;
+       }
+       if (is_runtime && !wil->platform_ops.suspend) {
+               rc = -EBUSY;
+               goto out;
+       }
        if (!(ndev->flags & IFF_UP)) {
                /* can always sleep when down */
                wil_dbg_pm(wil, "Interface is down\n");
@@ -44,6 +59,10 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
        /* interface is running */
        switch (wdev->iftype) {
        case NL80211_IFTYPE_MONITOR:
+               wil_dbg_pm(wil, "Sniffer\n");
+               rc = -EBUSY;
+               goto out;
+       /* for STA-like interface, don't runtime suspend */
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
                if (test_bit(wil_status_fwconnecting, wil->status)) {
@@ -51,6 +70,12 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
                        rc = -EBUSY;
                        goto out;
                }
+               /* Runtime pm not supported in case the interface is up */
+               if (is_runtime) {
+                       wil_dbg_pm(wil, "STA-like interface\n");
+                       rc = -EBUSY;
+                       goto out;
+               }
                break;
        /* AP-like interface - can't suspend */
        default:
@@ -158,7 +183,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
                                        break;
                                wil_err(wil,
                                        "TO waiting for idle RX, suspend failed\n");
-                               wil->suspend_stats.failed_suspends++;
+                               wil->suspend_stats.r_on.failed_suspends++;
                                goto resume_after_fail;
                        }
                        wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
@@ -174,7 +199,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
         */
        if (!wil_is_wmi_idle(wil)) {
                wil_err(wil, "suspend failed due to pending WMI events\n");
-               wil->suspend_stats.failed_suspends++;
+               wil->suspend_stats.r_on.failed_suspends++;
                goto resume_after_fail;
        }
 
@@ -188,7 +213,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
                if (rc) {
                        wil_err(wil, "platform device failed to suspend (%d)\n",
                                rc);
-                       wil->suspend_stats.failed_suspends++;
+                       wil->suspend_stats.r_on.failed_suspends++;
                        wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
                        wil_unmask_irq(wil);
                        goto resume_after_fail;
@@ -235,6 +260,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
                rc = wil_down(wil);
                if (rc) {
                        wil_err(wil, "wil_down : %d\n", rc);
+                       wil->suspend_stats.r_off.failed_suspends++;
                        goto out;
                }
        }
@@ -247,6 +273,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
                rc = wil->platform_ops.suspend(wil->platform_handle, false);
                if (rc) {
                        wil_enable_irq(wil);
+                       wil->suspend_stats.r_off.failed_suspends++;
                        goto out;
                }
        }
@@ -279,12 +306,9 @@ static int wil_resume_radio_off(struct wil6210_priv *wil)
        return rc;
 }
 
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
 {
        int rc = 0;
-       struct net_device *ndev = wil_to_ndev(wil);
-       bool keep_radio_on = ndev->flags & IFF_UP &&
-                            wil->keep_radio_on_during_sleep;
 
        wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
 
@@ -301,19 +325,12 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
        wil_dbg_pm(wil, "suspend: %s => %d\n",
                   is_runtime ? "runtime" : "system", rc);
 
-       if (!rc)
-               wil->suspend_stats.suspend_start_time = ktime_get();
-
        return rc;
 }
 
-int wil_resume(struct wil6210_priv *wil, bool is_runtime)
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
 {
        int rc = 0;
-       struct net_device *ndev = wil_to_ndev(wil);
-       bool keep_radio_on = ndev->flags & IFF_UP &&
-                            wil->keep_radio_on_during_sleep;
-       unsigned long long suspend_time_usec = 0;
 
        wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
 
@@ -331,20 +348,49 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
        else
                rc = wil_resume_radio_off(wil);
 
-       if (rc)
-               goto out;
-
-       suspend_time_usec =
-               ktime_to_us(ktime_sub(ktime_get(),
-                                     wil->suspend_stats.suspend_start_time));
-       wil->suspend_stats.total_suspend_time += suspend_time_usec;
-       if (suspend_time_usec < wil->suspend_stats.min_suspend_time)
-               wil->suspend_stats.min_suspend_time = suspend_time_usec;
-       if (suspend_time_usec > wil->suspend_stats.max_suspend_time)
-               wil->suspend_stats.max_suspend_time = suspend_time_usec;
-
 out:
-       wil_dbg_pm(wil, "resume: %s => %d, suspend time %lld usec\n",
-                  is_runtime ? "runtime" : "system", rc, suspend_time_usec);
+       wil_dbg_pm(wil, "resume: %s => %d\n", is_runtime ? "runtime" : "system",
+                  rc);
        return rc;
 }
+
+void wil_pm_runtime_allow(struct wil6210_priv *wil)
+{
+       struct device *dev = wil_to_dev(wil);
+
+       pm_runtime_put_noidle(dev);
+       pm_runtime_set_autosuspend_delay(dev, WIL6210_AUTOSUSPEND_DELAY_MS);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_allow(dev);
+}
+
+void wil_pm_runtime_forbid(struct wil6210_priv *wil)
+{
+       struct device *dev = wil_to_dev(wil);
+
+       pm_runtime_forbid(dev);
+       pm_runtime_get_noresume(dev);
+}
+
+int wil_pm_runtime_get(struct wil6210_priv *wil)
+{
+       int rc;
+       struct device *dev = wil_to_dev(wil);
+
+       rc = pm_runtime_get_sync(dev);
+       if (rc < 0) {
+               wil_err(wil, "pm_runtime_get_sync() failed, rc = %d\n", rc);
+               pm_runtime_put_noidle(dev);
+               return rc;
+       }
+
+       return 0;
+}
+
+void wil_pm_runtime_put(struct wil6210_priv *wil)
+{
+       struct device *dev = wil_to_dev(wil);
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+}
index 1e340d04bd70c5ed0e746a620c03c08fc3c05e8b..cf27d9711dde28e594f49f7f6f64853a99ea8d5c 100644 (file)
@@ -82,18 +82,18 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
  */
 #define WIL_MAX_MPDU_OVERHEAD  (62)
 
-struct wil_suspend_stats {
+struct wil_suspend_count_stats {
        unsigned long successful_suspends;
-       unsigned long failed_suspends;
        unsigned long successful_resumes;
+       unsigned long failed_suspends;
        unsigned long failed_resumes;
-       unsigned long rejected_by_device;
+};
+
+struct wil_suspend_stats {
+       struct wil_suspend_count_stats r_off;
+       struct wil_suspend_count_stats r_on;
+       unsigned long rejected_by_device; /* only radio on */
        unsigned long rejected_by_host;
-       unsigned long long total_suspend_time;
-       unsigned long long min_suspend_time;
-       unsigned long long max_suspend_time;
-       ktime_t collection_start;
-       ktime_t suspend_start_time;
 };
 
 /* Calculate MAC buffer size for the firmware. It includes all overhead,
@@ -616,6 +616,16 @@ struct blink_on_off_time {
        u32 off_ms;
 };
 
+struct wil_debugfs_iomem_data {
+       void *offset;
+       struct wil6210_priv *wil;
+};
+
+struct wil_debugfs_data {
+       struct wil_debugfs_iomem_data *data_arr;
+       int iomem_data_count;
+};
+
 extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST];
 extern u8 led_id;
 extern u8 led_polarity;
@@ -708,6 +718,7 @@ struct wil6210_priv {
        u8 abft_len;
        u8 wakeup_trigger;
        struct wil_suspend_stats suspend_stats;
+       struct wil_debugfs_data dbg_data;
 
        void *platform_handle;
        struct wil_platform_ops platform_ops;
@@ -732,9 +743,7 @@ struct wil6210_priv {
        int fw_calib_result;
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
        struct notifier_block pm_notify;
-#endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
        bool suspend_resp_rcvd;
@@ -861,10 +870,12 @@ int wil_up(struct wil6210_priv *wil);
 int __wil_up(struct wil6210_priv *wil);
 int wil_down(struct wil6210_priv *wil);
 int __wil_down(struct wil6210_priv *wil);
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil);
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
 int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
 void wil_set_ethtoolops(struct net_device *ndev);
 
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr, u32 size);
 void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
 void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
 int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
@@ -999,9 +1010,14 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name,
                         bool load);
 bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
 
+void wil_pm_runtime_allow(struct wil6210_priv *wil);
+void wil_pm_runtime_forbid(struct wil6210_priv *wil);
+int wil_pm_runtime_get(struct wil6210_priv *wil);
+void wil_pm_runtime_put(struct wil6210_priv *wil);
+
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
 bool wil_is_wmi_idle(struct wil6210_priv *wil);
 int wmi_resume(struct wil6210_priv *wil);
 int wmi_suspend(struct wil6210_priv *wil);
index ffdd2fa401b1502c6a35506a56dc66feb92c5df2..8ace618d0fd9023b275bd0391538b7ce21c90ea4 100644 (file)
@@ -140,13 +140,15 @@ static u32 wmi_addr_remap(u32 x)
 /**
  * Check address validity for WMI buffer; remap if needed
  * @ptr - internal (linker) fw/ucode address
+ * @size - if non zero, validate the block does not
+ *  exceed the device memory (bar)
  *
  * Valid buffer should be DWORD aligned
  *
  * return address for accessing buffer from the host;
  * if buffer is not valid, return NULL.
  */
-void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr_, u32 size)
 {
        u32 off;
        u32 ptr = le32_to_cpu(ptr_);
@@ -161,10 +163,17 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
        off = HOSTADDR(ptr);
        if (off > wil->bar_size - 4)
                return NULL;
+       if (size && ((off + size > wil->bar_size) || (off + size < off)))
+               return NULL;
 
        return wil->csr + off;
 }
 
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+       return wmi_buffer_block(wil, ptr_, 0);
+}
+
 /**
  * Check address validity
  */
@@ -198,6 +207,232 @@ int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
        return 0;
 }
 
+static const char *cmdid2name(u16 cmdid)
+{
+       switch (cmdid) {
+       case WMI_NOTIFY_REQ_CMDID:
+               return "WMI_NOTIFY_REQ_CMD";
+       case WMI_START_SCAN_CMDID:
+               return "WMI_START_SCAN_CMD";
+       case WMI_CONNECT_CMDID:
+               return "WMI_CONNECT_CMD";
+       case WMI_DISCONNECT_CMDID:
+               return "WMI_DISCONNECT_CMD";
+       case WMI_SW_TX_REQ_CMDID:
+               return "WMI_SW_TX_REQ_CMD";
+       case WMI_GET_RF_SECTOR_PARAMS_CMDID:
+               return "WMI_GET_RF_SECTOR_PARAMS_CMD";
+       case WMI_SET_RF_SECTOR_PARAMS_CMDID:
+               return "WMI_SET_RF_SECTOR_PARAMS_CMD";
+       case WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID:
+               return "WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD";
+       case WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID:
+               return "WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD";
+       case WMI_BRP_SET_ANT_LIMIT_CMDID:
+               return "WMI_BRP_SET_ANT_LIMIT_CMD";
+       case WMI_TOF_SESSION_START_CMDID:
+               return "WMI_TOF_SESSION_START_CMD";
+       case WMI_AOA_MEAS_CMDID:
+               return "WMI_AOA_MEAS_CMD";
+       case WMI_PMC_CMDID:
+               return "WMI_PMC_CMD";
+       case WMI_TOF_GET_TX_RX_OFFSET_CMDID:
+               return "WMI_TOF_GET_TX_RX_OFFSET_CMD";
+       case WMI_TOF_SET_TX_RX_OFFSET_CMDID:
+               return "WMI_TOF_SET_TX_RX_OFFSET_CMD";
+       case WMI_VRING_CFG_CMDID:
+               return "WMI_VRING_CFG_CMD";
+       case WMI_BCAST_VRING_CFG_CMDID:
+               return "WMI_BCAST_VRING_CFG_CMD";
+       case WMI_TRAFFIC_SUSPEND_CMDID:
+               return "WMI_TRAFFIC_SUSPEND_CMD";
+       case WMI_TRAFFIC_RESUME_CMDID:
+               return "WMI_TRAFFIC_RESUME_CMD";
+       case WMI_ECHO_CMDID:
+               return "WMI_ECHO_CMD";
+       case WMI_SET_MAC_ADDRESS_CMDID:
+               return "WMI_SET_MAC_ADDRESS_CMD";
+       case WMI_LED_CFG_CMDID:
+               return "WMI_LED_CFG_CMD";
+       case WMI_PCP_START_CMDID:
+               return "WMI_PCP_START_CMD";
+       case WMI_PCP_STOP_CMDID:
+               return "WMI_PCP_STOP_CMD";
+       case WMI_SET_SSID_CMDID:
+               return "WMI_SET_SSID_CMD";
+       case WMI_GET_SSID_CMDID:
+               return "WMI_GET_SSID_CMD";
+       case WMI_SET_PCP_CHANNEL_CMDID:
+               return "WMI_SET_PCP_CHANNEL_CMD";
+       case WMI_GET_PCP_CHANNEL_CMDID:
+               return "WMI_GET_PCP_CHANNEL_CMD";
+       case WMI_P2P_CFG_CMDID:
+               return "WMI_P2P_CFG_CMD";
+       case WMI_START_LISTEN_CMDID:
+               return "WMI_START_LISTEN_CMD";
+       case WMI_START_SEARCH_CMDID:
+               return "WMI_START_SEARCH_CMD";
+       case WMI_DISCOVERY_STOP_CMDID:
+               return "WMI_DISCOVERY_STOP_CMD";
+       case WMI_DELETE_CIPHER_KEY_CMDID:
+               return "WMI_DELETE_CIPHER_KEY_CMD";
+       case WMI_ADD_CIPHER_KEY_CMDID:
+               return "WMI_ADD_CIPHER_KEY_CMD";
+       case WMI_SET_APPIE_CMDID:
+               return "WMI_SET_APPIE_CMD";
+       case WMI_CFG_RX_CHAIN_CMDID:
+               return "WMI_CFG_RX_CHAIN_CMD";
+       case WMI_TEMP_SENSE_CMDID:
+               return "WMI_TEMP_SENSE_CMD";
+       case WMI_DEL_STA_CMDID:
+               return "WMI_DEL_STA_CMD";
+       case WMI_DISCONNECT_STA_CMDID:
+               return "WMI_DISCONNECT_STA_CMD";
+       case WMI_VRING_BA_EN_CMDID:
+               return "WMI_VRING_BA_EN_CMD";
+       case WMI_VRING_BA_DIS_CMDID:
+               return "WMI_VRING_BA_DIS_CMD";
+       case WMI_RCP_DELBA_CMDID:
+               return "WMI_RCP_DELBA_CMD";
+       case WMI_RCP_ADDBA_RESP_CMDID:
+               return "WMI_RCP_ADDBA_RESP_CMD";
+       case WMI_PS_DEV_PROFILE_CFG_CMDID:
+               return "WMI_PS_DEV_PROFILE_CFG_CMD";
+       case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
+               return "WMI_SET_MGMT_RETRY_LIMIT_CMD";
+       case WMI_GET_MGMT_RETRY_LIMIT_CMDID:
+               return "WMI_GET_MGMT_RETRY_LIMIT_CMD";
+       case WMI_ABORT_SCAN_CMDID:
+               return "WMI_ABORT_SCAN_CMD";
+       case WMI_NEW_STA_CMDID:
+               return "WMI_NEW_STA_CMD";
+       case WMI_SET_THERMAL_THROTTLING_CFG_CMDID:
+               return "WMI_SET_THERMAL_THROTTLING_CFG_CMD";
+       case WMI_GET_THERMAL_THROTTLING_CFG_CMDID:
+               return "WMI_GET_THERMAL_THROTTLING_CFG_CMD";
+       case WMI_LINK_MAINTAIN_CFG_WRITE_CMDID:
+               return "WMI_LINK_MAINTAIN_CFG_WRITE_CMD";
+       case WMI_LO_POWER_CALIB_FROM_OTP_CMDID:
+               return "WMI_LO_POWER_CALIB_FROM_OTP_CMD";
+       default:
+               return "Untracked CMD";
+       }
+}
+
+static const char *eventid2name(u16 eventid)
+{
+       switch (eventid) {
+       case WMI_NOTIFY_REQ_DONE_EVENTID:
+               return "WMI_NOTIFY_REQ_DONE_EVENT";
+       case WMI_DISCONNECT_EVENTID:
+               return "WMI_DISCONNECT_EVENT";
+       case WMI_SW_TX_COMPLETE_EVENTID:
+               return "WMI_SW_TX_COMPLETE_EVENT";
+       case WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID:
+               return "WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT";
+       case WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID:
+               return "WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT";
+       case WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+               return "WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+       case WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+               return "WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+       case WMI_BRP_SET_ANT_LIMIT_EVENTID:
+               return "WMI_BRP_SET_ANT_LIMIT_EVENT";
+       case WMI_FW_READY_EVENTID:
+               return "WMI_FW_READY_EVENT";
+       case WMI_TRAFFIC_RESUME_EVENTID:
+               return "WMI_TRAFFIC_RESUME_EVENT";
+       case WMI_TOF_GET_TX_RX_OFFSET_EVENTID:
+               return "WMI_TOF_GET_TX_RX_OFFSET_EVENT";
+       case WMI_TOF_SET_TX_RX_OFFSET_EVENTID:
+               return "WMI_TOF_SET_TX_RX_OFFSET_EVENT";
+       case WMI_VRING_CFG_DONE_EVENTID:
+               return "WMI_VRING_CFG_DONE_EVENT";
+       case WMI_READY_EVENTID:
+               return "WMI_READY_EVENT";
+       case WMI_RX_MGMT_PACKET_EVENTID:
+               return "WMI_RX_MGMT_PACKET_EVENT";
+       case WMI_TX_MGMT_PACKET_EVENTID:
+               return "WMI_TX_MGMT_PACKET_EVENT";
+       case WMI_SCAN_COMPLETE_EVENTID:
+               return "WMI_SCAN_COMPLETE_EVENT";
+       case WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID:
+               return "WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT";
+       case WMI_CONNECT_EVENTID:
+               return "WMI_CONNECT_EVENT";
+       case WMI_EAPOL_RX_EVENTID:
+               return "WMI_EAPOL_RX_EVENT";
+       case WMI_BA_STATUS_EVENTID:
+               return "WMI_BA_STATUS_EVENT";
+       case WMI_RCP_ADDBA_REQ_EVENTID:
+               return "WMI_RCP_ADDBA_REQ_EVENT";
+       case WMI_DELBA_EVENTID:
+               return "WMI_DELBA_EVENT";
+       case WMI_VRING_EN_EVENTID:
+               return "WMI_VRING_EN_EVENT";
+       case WMI_DATA_PORT_OPEN_EVENTID:
+               return "WMI_DATA_PORT_OPEN_EVENT";
+       case WMI_AOA_MEAS_EVENTID:
+               return "WMI_AOA_MEAS_EVENT";
+       case WMI_TOF_SESSION_END_EVENTID:
+               return "WMI_TOF_SESSION_END_EVENT";
+       case WMI_TOF_GET_CAPABILITIES_EVENTID:
+               return "WMI_TOF_GET_CAPABILITIES_EVENT";
+       case WMI_TOF_SET_LCR_EVENTID:
+               return "WMI_TOF_SET_LCR_EVENT";
+       case WMI_TOF_SET_LCI_EVENTID:
+               return "WMI_TOF_SET_LCI_EVENT";
+       case WMI_TOF_FTM_PER_DEST_RES_EVENTID:
+               return "WMI_TOF_FTM_PER_DEST_RES_EVENT";
+       case WMI_TOF_CHANNEL_INFO_EVENTID:
+               return "WMI_TOF_CHANNEL_INFO_EVENT";
+       case WMI_TRAFFIC_SUSPEND_EVENTID:
+               return "WMI_TRAFFIC_SUSPEND_EVENT";
+       case WMI_ECHO_RSP_EVENTID:
+               return "WMI_ECHO_RSP_EVENT";
+       case WMI_LED_CFG_DONE_EVENTID:
+               return "WMI_LED_CFG_DONE_EVENT";
+       case WMI_PCP_STARTED_EVENTID:
+               return "WMI_PCP_STARTED_EVENT";
+       case WMI_PCP_STOPPED_EVENTID:
+               return "WMI_PCP_STOPPED_EVENT";
+       case WMI_GET_SSID_EVENTID:
+               return "WMI_GET_SSID_EVENT";
+       case WMI_GET_PCP_CHANNEL_EVENTID:
+               return "WMI_GET_PCP_CHANNEL_EVENT";
+       case WMI_P2P_CFG_DONE_EVENTID:
+               return "WMI_P2P_CFG_DONE_EVENT";
+       case WMI_LISTEN_STARTED_EVENTID:
+               return "WMI_LISTEN_STARTED_EVENT";
+       case WMI_SEARCH_STARTED_EVENTID:
+               return "WMI_SEARCH_STARTED_EVENT";
+       case WMI_DISCOVERY_STOPPED_EVENTID:
+               return "WMI_DISCOVERY_STOPPED_EVENT";
+       case WMI_CFG_RX_CHAIN_DONE_EVENTID:
+               return "WMI_CFG_RX_CHAIN_DONE_EVENT";
+       case WMI_TEMP_SENSE_DONE_EVENTID:
+               return "WMI_TEMP_SENSE_DONE_EVENT";
+       case WMI_RCP_ADDBA_RESP_SENT_EVENTID:
+               return "WMI_RCP_ADDBA_RESP_SENT_EVENT";
+       case WMI_PS_DEV_PROFILE_CFG_EVENTID:
+               return "WMI_PS_DEV_PROFILE_CFG_EVENT";
+       case WMI_SET_MGMT_RETRY_LIMIT_EVENTID:
+               return "WMI_SET_MGMT_RETRY_LIMIT_EVENT";
+       case WMI_GET_MGMT_RETRY_LIMIT_EVENTID:
+               return "WMI_GET_MGMT_RETRY_LIMIT_EVENT";
+       case WMI_SET_THERMAL_THROTTLING_CFG_EVENTID:
+               return "WMI_SET_THERMAL_THROTTLING_CFG_EVENT";
+       case WMI_GET_THERMAL_THROTTLING_CFG_EVENTID:
+               return "WMI_GET_THERMAL_THROTTLING_CFG_EVENT";
+       case WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID:
+               return "WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENT";
+       case WMI_LO_POWER_CALIB_FROM_OTP_EVENTID:
+               return "WMI_LO_POWER_CALIB_FROM_OTP_EVENT";
+       default:
+               return "Untracked EVENT";
+       }
+}
+
 static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
 {
        struct {
@@ -222,7 +457,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
        uint retry;
        int rc = 0;
 
-       if (sizeof(cmd) + len > r->entry_size) {
+       if (len > r->entry_size - sizeof(cmd)) {
                wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
                        (int)(sizeof(cmd) + len), r->entry_size);
                return -ERANGE;
@@ -294,7 +529,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
        }
        cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
        /* set command */
-       wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+       wil_dbg_wmi(wil, "sending %s (0x%04x) [%d]\n",
+                   cmdid2name(cmdid), cmdid, len);
        wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
                         sizeof(cmd), true);
        wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
@@ -963,8 +1199,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                        }
                        spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
 
-                       wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
-                                   id, wmi->mid, tstamp);
+                       wil_dbg_wmi(wil, "recv %s (0x%04x) MID %d @%d msec\n",
+                                   eventid2name(id), id, wmi->mid, tstamp);
                        trace_wil6210_wmi_event(wmi, &wmi[1],
                                                len - sizeof(*wmi));
                }
@@ -1380,8 +1616,14 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
        };
        int rc;
        u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
-       struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+       struct wmi_set_appie_cmd *cmd;
+
+       if (len < ie_len) {
+               rc = -EINVAL;
+               goto out;
+       }
 
+       cmd = kzalloc(len, GFP_KERNEL);
        if (!cmd) {
                rc = -ENOMEM;
                goto out;
@@ -1801,6 +2043,16 @@ void wmi_event_flush(struct wil6210_priv *wil)
        spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
 }
 
+static const char *suspend_status2name(u8 status)
+{
+       switch (status) {
+       case WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE:
+               return "LINK_NOT_IDLE";
+       default:
+               return "Untracked status";
+       }
+}
+
 int wmi_suspend(struct wil6210_priv *wil)
 {
        int rc;
@@ -1816,7 +2068,7 @@ int wmi_suspend(struct wil6210_priv *wil)
        wil->suspend_resp_rcvd = false;
        wil->suspend_resp_comp = false;
 
-       reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED;
+       reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE;
 
        rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd),
                      WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
@@ -1848,8 +2100,9 @@ int wmi_suspend(struct wil6210_priv *wil)
        }
 
        wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
-       if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) {
-               wil_dbg_pm(wil, "device rejected the suspend\n");
+       if (reply.evt.status != WMI_TRAFFIC_SUSPEND_APPROVED) {
+               wil_dbg_pm(wil, "device rejected the suspend, %s\n",
+                          suspend_status2name(reply.evt.status));
                wil->suspend_stats.rejected_by_device++;
        }
        rc = reply.evt.status;
@@ -1861,21 +2114,50 @@ out:
        return rc;
 }
 
+static void resume_triggers2string(u32 triggers, char *string, int str_size)
+{
+       string[0] = '\0';
+
+       if (!triggers) {
+               strlcat(string, " UNKNOWN", str_size);
+               return;
+       }
+
+       if (triggers & WMI_RESUME_TRIGGER_HOST)
+               strlcat(string, " HOST", str_size);
+
+       if (triggers & WMI_RESUME_TRIGGER_UCAST_RX)
+               strlcat(string, " UCAST_RX", str_size);
+
+       if (triggers & WMI_RESUME_TRIGGER_BCAST_RX)
+               strlcat(string, " BCAST_RX", str_size);
+
+       if (triggers & WMI_RESUME_TRIGGER_WMI_EVT)
+               strlcat(string, " WMI_EVT", str_size);
+}
+
 int wmi_resume(struct wil6210_priv *wil)
 {
        int rc;
+       char string[100];
        struct {
                struct wmi_cmd_hdr wmi;
                struct wmi_traffic_resume_event evt;
        } __packed reply;
 
        reply.evt.status = WMI_TRAFFIC_RESUME_FAILED;
+       reply.evt.resume_triggers = WMI_RESUME_TRIGGER_UNKNOWN;
 
        rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0,
                      WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
                      WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
        if (rc)
                return rc;
+       resume_triggers2string(le32_to_cpu(reply.evt.resume_triggers), string,
+                              sizeof(string));
+       wil_dbg_pm(wil, "device resume %s, resume triggers:%s (0x%x)\n",
+                  reply.evt.status ? "failed" : "passed", string,
+                  le32_to_cpu(reply.evt.resume_triggers));
 
        return reply.evt.status;
 }
@@ -1906,8 +2188,8 @@ static void wmi_event_handle(struct wil6210_priv *wil,
                void *evt_data = (void *)(&wmi[1]);
                u16 id = le16_to_cpu(wmi->command_id);
 
-               wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n",
-                           id, wil->reply_id);
+               wil_dbg_wmi(wil, "Handle %s (0x%04x) (reply_id 0x%04x)\n",
+                           eventid2name(id), id, wil->reply_id);
                /* check if someone waits for this event */
                if (wil->reply_id && wil->reply_id == id) {
                        WARN_ON(wil->reply_buf);
index 5263ee717a4fbe5bbd91bf00c45ffc8c154763ee..d9e220a8c0f4879b11bdb000c42351b24ff95b32 100644 (file)
@@ -2267,8 +2267,8 @@ struct wmi_link_maintain_cfg_read_done_event {
 } __packed;
 
 enum wmi_traffic_suspend_status {
-       WMI_TRAFFIC_SUSPEND_APPROVED    = 0x0,
-       WMI_TRAFFIC_SUSPEND_REJECTED    = 0x1,
+       WMI_TRAFFIC_SUSPEND_APPROVED                    = 0x0,
+       WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE      = 0x1,
 };
 
 /* WMI_TRAFFIC_SUSPEND_EVENTID */
@@ -2282,10 +2282,21 @@ enum wmi_traffic_resume_status {
        WMI_TRAFFIC_RESUME_FAILED       = 0x1,
 };
 
+enum wmi_resume_trigger {
+       WMI_RESUME_TRIGGER_UNKNOWN      = 0x0,
+       WMI_RESUME_TRIGGER_HOST         = 0x1,
+       WMI_RESUME_TRIGGER_UCAST_RX     = 0x2,
+       WMI_RESUME_TRIGGER_BCAST_RX     = 0x4,
+       WMI_RESUME_TRIGGER_WMI_EVT      = 0x8,
+};
+
 /* WMI_TRAFFIC_RESUME_EVENTID */
 struct wmi_traffic_resume_event {
-       /* enum wmi_traffic_resume_status_e */
+       /* enum wmi_traffic_resume_status */
        u8 status;
+       u8 reserved[3];
+       /* enum wmi_resume_trigger bitmap */
+       __le32 resume_triggers;
 } __packed;
 
 /* Power Save command completion status codes */
index cd587325e2867915165d278e89174594d0b6b02b..f8b47c1f4bcd68376bbd0c75bde936585e6401a5 100644 (file)
@@ -137,27 +137,27 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
                if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
                        /* assign GPIO to SDIO core */
                        addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
-                       gpiocontrol = brcmf_sdiod_regrl(sdiodev, addr, &ret);
+                       gpiocontrol = brcmf_sdiod_readl(sdiodev, addr, &ret);
                        gpiocontrol |= 0x2;
-                       brcmf_sdiod_regwl(sdiodev, addr, gpiocontrol, &ret);
+                       brcmf_sdiod_writel(sdiodev, addr, gpiocontrol, &ret);
 
-                       brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_SELECT, 0xf,
-                                         &ret);
-                       brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
-                       brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
+                       brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_SELECT,
+                                          0xf, &ret);
+                       brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
+                       brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
                }
 
                /* must configure SDIO_CCCR_IENx to enable irq */
-               data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
+               data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret);
                data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
-               brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
+               brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret);
 
                /* redirect, configure and enable io for interrupt signal */
-               data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
+               data = SDIO_CCCR_BRCM_SEPINT_MASK | SDIO_CCCR_BRCM_SEPINT_OE;
                if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
-                       data |= SDIO_SEPINT_ACT_HI;
-               brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
-
+                       data |= SDIO_CCCR_BRCM_SEPINT_ACT_HI;
+               brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT,
+                                    data, &ret);
                sdio_release_host(sdiodev->func[1]);
        } else {
                brcmf_dbg(SDIO, "Entering\n");
@@ -183,8 +183,8 @@ void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
 
                pdata = &sdiodev->settings->bus.sdio;
                sdio_claim_host(sdiodev->func[1]);
-               brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
-               brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
+               brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
+               brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
                sdio_release_host(sdiodev->func[1]);
 
                sdiodev->oob_irq_requested = false;
@@ -230,244 +230,93 @@ void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
        sdiodev->state = state;
 }
 
-static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
-                                       uint regaddr, u8 byte)
-{
-       int err_ret;
-
-       /*
-        * Can only directly write to some F0 registers.
-        * Handle CCCR_IENx and CCCR_ABORT command
-        * as a special case.
-        */
-       if ((regaddr == SDIO_CCCR_ABORT) ||
-           (regaddr == SDIO_CCCR_IENx))
-               sdio_writeb(func, byte, regaddr, &err_ret);
-       else
-               sdio_f0_writeb(func, byte, regaddr, &err_ret);
-
-       return err_ret;
-}
-
-static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
-                                   u32 addr, u8 regsz, void *data, bool write)
-{
-       struct sdio_func *func;
-       int ret = -EINVAL;
-
-       brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
-                 write, fn, addr, regsz);
-
-       /* only allow byte access on F0 */
-       if (WARN_ON(regsz > 1 && !fn))
-               return -EINVAL;
-       func = sdiodev->func[fn];
-
-       switch (regsz) {
-       case sizeof(u8):
-               if (write) {
-                       if (fn)
-                               sdio_writeb(func, *(u8 *)data, addr, &ret);
-                       else
-                               ret = brcmf_sdiod_f0_writeb(func, addr,
-                                                           *(u8 *)data);
-               } else {
-                       if (fn)
-                               *(u8 *)data = sdio_readb(func, addr, &ret);
-                       else
-                               *(u8 *)data = sdio_f0_readb(func, addr, &ret);
-               }
-               break;
-       case sizeof(u16):
-               if (write)
-                       sdio_writew(func, *(u16 *)data, addr, &ret);
-               else
-                       *(u16 *)data = sdio_readw(func, addr, &ret);
-               break;
-       case sizeof(u32):
-               if (write)
-                       sdio_writel(func, *(u32 *)data, addr, &ret);
-               else
-                       *(u32 *)data = sdio_readl(func, addr, &ret);
-               break;
-       default:
-               brcmf_err("invalid size: %d\n", regsz);
-               break;
-       }
-
-       if (ret)
-               brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
-                         write ? "write" : "read", fn, addr, ret);
-
-       return ret;
-}
-
-static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
-                                  u8 regsz, void *data, bool write)
-{
-       u8 func;
-       s32 retry = 0;
-       int ret;
-
-       if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
-               return -ENOMEDIUM;
-
-       /*
-        * figure out how to read the register based on address range
-        * 0x00 ~ 0x7FF: function 0 CCCR and FBR
-        * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
-        * The rest: function 1 silicon backplane core registers
-        */
-       if ((addr & ~REG_F0_REG_MASK) == 0)
-               func = SDIO_FUNC_0;
-       else
-               func = SDIO_FUNC_1;
-
-       do {
-               if (!write)
-                       memset(data, 0, regsz);
-               /* for retry wait for 1 ms till bus get settled down */
-               if (retry)
-                       usleep_range(1000, 2000);
-               ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
-                                              data, write);
-       } while (ret != 0 && ret != -ENOMEDIUM &&
-                retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
-
-       if (ret == -ENOMEDIUM)
-               brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
-       else if (ret != 0) {
-               /*
-                * SleepCSR register access can fail when
-                * waking up the device so reduce this noise
-                * in the logs.
-                */
-               if (addr != SBSDIO_FUNC1_SLEEPCSR)
-                       brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
-                                 write ? "write" : "read", func, addr, ret);
-               else
-                       brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
-                                 write ? "write" : "read", func, addr, ret);
-       }
-       return ret;
-}
-
-static int
-brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
+static int brcmf_sdiod_set_backplane_window(struct brcmf_sdio_dev *sdiodev,
+                                           u32 addr)
 {
+       u32 v, bar0 = addr & SBSDIO_SBWINDOW_MASK;
        int err = 0, i;
-       u8 addr[3];
-
-       if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
-               return -ENOMEDIUM;
-
-       addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
-       addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
-       addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
-
-       for (i = 0; i < 3; i++) {
-               err = brcmf_sdiod_regrw_helper(sdiodev,
-                                              SBSDIO_FUNC1_SBADDRLOW + i,
-                                              sizeof(u8), &addr[i], true);
-               if (err) {
-                       brcmf_err("failed at addr: 0x%0x\n",
-                                 SBSDIO_FUNC1_SBADDRLOW + i);
-                       break;
-               }
-       }
 
-       return err;
-}
+       if (bar0 == sdiodev->sbwad)
+               return 0;
 
-static int
-brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
-{
-       uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
-       int err = 0;
+       v = bar0 >> 8;
 
-       if (bar0 != sdiodev->sbwad) {
-               err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
-               if (err)
-                       return err;
+       for (i = 0 ; i < 3 && !err ; i++, v >>= 8)
+               brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SBADDRLOW + i,
+                                  v & 0xff, &err);
 
+       if (!err)
                sdiodev->sbwad = bar0;
-       }
 
-       *addr &= SBSDIO_SB_OFT_ADDR_MASK;
-
-       if (width == 4)
-               *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
-
-       return 0;
+       return err;
 }
 
-u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
 {
-       u8 data;
+       u32 data = 0;
        int retval;
 
-       brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
-       retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
-                                         false);
-       brcmf_dbg(SDIO, "data:0x%02x\n", data);
+       retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
+       if (retval)
+               goto out;
+
+       addr &= SBSDIO_SB_OFT_ADDR_MASK;
+       addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+       data = sdio_readl(sdiodev->func[1], addr, &retval);
 
+out:
        if (ret)
                *ret = retval;
 
        return data;
 }
 
-u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr,
+                       u32 data, int *ret)
 {
-       u32 data = 0;
        int retval;
 
-       brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
-       retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+       retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
        if (retval)
-               goto done;
-       retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
-                                         false);
-       brcmf_dbg(SDIO, "data:0x%08x\n", data);
-
-done:
-       if (ret)
-               *ret = retval;
+               goto out;
 
-       return data;
-}
+       addr &= SBSDIO_SB_OFT_ADDR_MASK;
+       addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
-void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
-                     u8 data, int *ret)
-{
-       int retval;
+       sdio_writel(sdiodev->func[1], data, addr, &retval);
 
-       brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
-       retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
-                                         true);
+out:
        if (ret)
                *ret = retval;
 }
 
-void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
-                     u32 data, int *ret)
+static int brcmf_sdiod_buff_read(struct brcmf_sdio_dev *sdiodev, uint fn,
+                                u32 addr, struct sk_buff *pkt)
 {
-       int retval;
+       unsigned int req_sz;
+       int err;
 
-       brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
-       retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
-       if (retval)
-               goto done;
-       retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
-                                         true);
+       /* Single skb use the standard mmc interface */
+       req_sz = pkt->len + 3;
+       req_sz &= (uint)~3;
 
-done:
-       if (ret)
-               *ret = retval;
+       if (fn == 1)
+               err = sdio_memcpy_fromio(sdiodev->func[fn],
+                                        ((u8 *)(pkt->data)), addr, req_sz);
+       else
+               /* function 2 read is FIFO operation */
+               err = sdio_readsb(sdiodev->func[fn],
+                                 ((u8 *)(pkt->data)), addr, req_sz);
+
+       if (err == -ENOMEDIUM)
+               brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+
+       return err;
 }
 
-static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
-                            bool write, u32 addr, struct sk_buff *pkt)
+static int brcmf_sdiod_buff_write(struct brcmf_sdio_dev *sdiodev, uint fn,
+                                 u32 addr, struct sk_buff *pkt)
 {
        unsigned int req_sz;
        int err;
@@ -476,18 +325,12 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
        req_sz = pkt->len + 3;
        req_sz &= (uint)~3;
 
-       if (write)
-               err = sdio_memcpy_toio(sdiodev->func[fn], addr,
-                                      ((u8 *)(pkt->data)), req_sz);
-       else if (fn == 1)
-               err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
-                                        addr, req_sz);
-       else
-               /* function 2 read is FIFO operation */
-               err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
-                                 req_sz);
+       err = sdio_memcpy_toio(sdiodev->func[fn], addr,
+                              ((u8 *)(pkt->data)), req_sz);
+
        if (err == -ENOMEDIUM)
                brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+
        return err;
 }
 
@@ -691,11 +534,14 @@ int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
 
        brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
 
-       err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+       err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
        if (err)
                goto done;
 
-       err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
+       addr &= SBSDIO_SB_OFT_ADDR_MASK;
+       addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+       err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_2, addr, pkt);
 
 done:
        return err;
@@ -712,19 +558,22 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
        brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
                  addr, pktq->qlen);
 
-       err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+       err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
        if (err)
                goto done;
 
+       addr &= SBSDIO_SB_OFT_ADDR_MASK;
+       addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
        if (pktq->qlen == 1)
-               err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
-                                        pktq->next);
+               err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_2, addr,
+                                           pktq->next);
        else if (!sdiodev->sg_support) {
                glom_skb = brcmu_pkt_buf_get_skb(totlen);
                if (!glom_skb)
                        return -ENOMEM;
-               err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
-                                        glom_skb);
+               err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_2, addr,
+                                           glom_skb);
                if (err)
                        goto done;
 
@@ -748,6 +597,7 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
        int err;
 
        mypkt = brcmu_pkt_buf_get_skb(nbytes);
+
        if (!mypkt) {
                brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
                          nbytes);
@@ -756,15 +606,19 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
 
        memcpy(mypkt->data, buf, nbytes);
 
-       err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+       err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
+       if (err)
+               return err;
+
+       addr &= SBSDIO_SB_OFT_ADDR_MASK;
+       addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
        if (!err)
-               err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
-                                        mypkt);
+               err = brcmf_sdiod_buff_write(sdiodev, SDIO_FUNC_2, addr, mypkt);
 
        brcmu_pkt_buf_free_skb(mypkt);
-       return err;
 
+       return err;
 }
 
 int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
@@ -776,20 +630,24 @@ int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
 
        brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
 
-       err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+       err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
        if (err)
                return err;
 
-       if (pktq->qlen == 1 || !sdiodev->sg_support)
+       addr &= SBSDIO_SB_OFT_ADDR_MASK;
+       addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+       if (pktq->qlen == 1 || !sdiodev->sg_support) {
                skb_queue_walk(pktq, skb) {
-                       err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
-                                                addr, skb);
+                       err = brcmf_sdiod_buff_write(sdiodev, SDIO_FUNC_2,
+                                                    addr, skb);
                        if (err)
                                break;
                }
-       else
+       } else {
                err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
                                            pktq);
+       }
 
        return err;
 }
@@ -798,7 +656,7 @@ int
 brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
                  u8 *data, uint size)
 {
-       int bcmerror = 0;
+       int err = 0;
        struct sk_buff *pkt;
        u32 sdaddr;
        uint dsize;
@@ -823,8 +681,8 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
        /* Do the transfer(s) */
        while (size) {
                /* Set the backplane window to include the start address */
-               bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
-               if (bcmerror)
+               err = brcmf_sdiod_set_backplane_window(sdiodev, address);
+               if (err)
                        break;
 
                brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
@@ -835,11 +693,17 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
                sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
                skb_put(pkt, dsize);
-               if (write)
+
+               if (write) {
                        memcpy(pkt->data, data, dsize);
-               bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
-                                             sdaddr, pkt);
-               if (bcmerror) {
+                       err = brcmf_sdiod_buff_write(sdiodev, SDIO_FUNC_1,
+                                                    sdaddr, pkt);
+               } else {
+                       err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_1,
+                                                   sdaddr, pkt);
+               }
+
+               if (err) {
                        brcmf_err("membytes transfer failed\n");
                        break;
                }
@@ -859,24 +723,17 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 
        dev_kfree_skb(pkt);
 
-       /* Return the window to backplane enumeration space for core access */
-       if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
-               brcmf_err("FAILED to set window back to 0x%x\n",
-                         sdiodev->sbwad);
-
        sdio_release_host(sdiodev->func[1]);
 
-       return bcmerror;
+       return err;
 }
 
-int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, u8 fn)
 {
-       char t_func = (char)fn;
        brcmf_dbg(SDIO, "Enter\n");
 
-       /* issue abort cmd52 command through F0 */
-       brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
-                                sizeof(t_func), &t_func, true);
+       /* Issue abort cmd52 command through F0 */
+       brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, fn, NULL);
 
        brcmf_dbg(SDIO, "Exit\n");
        return 0;
index 53ae3025998999e26f2f06307fe4abd6f6343dc9..47de35a338532f65f4817b11b98b1f51b2c4344f 100644 (file)
@@ -130,13 +130,19 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
        }
 }
 
+#define MAX_CAPS_BUFFER_SIZE   512
 static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp)
 {
-       char caps[256];
+       char caps[MAX_CAPS_BUFFER_SIZE];
        enum brcmf_feat_id id;
-       int i;
+       int i, err;
+
+       err = brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps));
+       if (err) {
+               brcmf_err("could not get firmware cap (%d)\n", err);
+               return;
+       }
 
-       brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps));
        brcmf_dbg(INFO, "[ %s]\n", caps);
 
        for (i = 0; i < ARRAY_SIZE(brcmf_fwcap_map); i++) {
index cdf9e41615925c6978dc4620e032d65b055ee28d..5cc2d698ea755861ff1976cb2723ecc58eed51fa 100644 (file)
@@ -159,8 +159,8 @@ struct rte_console {
 /* manfid tuple length, include tuple, link bytes */
 #define SBSDIO_CIS_MANFID_TUPLE_LEN    6
 
-#define CORE_BUS_REG(base, field) \
-               (base + offsetof(struct sdpcmd_regs, field))
+#define SD_REG(field) \
+               (offsetof(struct sdpcmd_regs, field))
 
 /* SDIO function 1 register CHIPCLKCSR */
 /* Force ALP request to backplane */
@@ -436,6 +436,7 @@ struct brcmf_sdio_count {
 struct brcmf_sdio {
        struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
        struct brcmf_chip *ci;  /* Chip info struct */
+       struct brcmf_core *sdio_core; /* sdio core info struct */
 
        u32 hostintmask;        /* Copy of Host Interrupt Mask */
        atomic_t intstatus;     /* Intstatus bits (events) pending */
@@ -665,22 +666,20 @@ static bool data_ok(struct brcmf_sdio *bus)
  */
 static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
 {
-       struct brcmf_core *core;
+       struct brcmf_core *core = bus->sdio_core;
        int ret;
 
-       core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
-       *regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
+       *regvar = brcmf_sdiod_readl(bus->sdiodev, core->base + offset, &ret);
 
        return ret;
 }
 
 static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
 {
-       struct brcmf_core *core;
+       struct brcmf_core *core = bus->sdio_core;
        int ret;
 
-       core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
-       brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
+       brcmf_sdiod_writel(bus->sdiodev, core->base + reg_offset, regval, &ret);
 
        return ret;
 }
@@ -697,8 +696,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
 
        wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
        /* 1st KSO write goes to AOS wake up core if device is asleep  */
-       brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-                         wr_val, &err);
+       brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
 
        if (on) {
                /* device WAKEUP through KSO:
@@ -724,7 +722,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
                 * just one write attempt may fail,
                 * read it back until it matches written value
                 */
-               rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+               rd_val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
                                           &err);
                if (!err) {
                        if ((rd_val & bmask) == cmp_val)
@@ -734,9 +732,11 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
                /* bail out upon subsequent access errors */
                if (err && (err_cnt++ > BRCMF_SDIO_MAX_ACCESS_ERRORS))
                        break;
+
                udelay(KSO_WAIT_US);
-               brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-                                 wr_val, &err);
+               brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val,
+                                  &err);
+
        } while (try_cnt++ < MAX_KSO_ATTEMPTS);
 
        if (try_cnt > 2)
@@ -772,15 +772,15 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
                clkreq =
                    bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
 
-               brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-                                 clkreq, &err);
+               brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+                                  clkreq, &err);
                if (err) {
                        brcmf_err("HT Avail request error: %d\n", err);
                        return -EBADE;
                }
 
                /* Check current status */
-               clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+               clkctl = brcmf_sdiod_readb(bus->sdiodev,
                                           SBSDIO_FUNC1_CHIPCLKCSR, &err);
                if (err) {
                        brcmf_err("HT Avail read error: %d\n", err);
@@ -790,35 +790,34 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
                /* Go to pending and await interrupt if appropriate */
                if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
                        /* Allow only clock-available interrupt */
-                       devctl = brcmf_sdiod_regrb(bus->sdiodev,
+                       devctl = brcmf_sdiod_readb(bus->sdiodev,
                                                   SBSDIO_DEVICE_CTL, &err);
                        if (err) {
-                               brcmf_err("Devctl error setting CA: %d\n",
-                                         err);
+                               brcmf_err("Devctl error setting CA: %d\n", err);
                                return -EBADE;
                        }
 
                        devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
-                       brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-                                         devctl, &err);
+                       brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+                                          devctl, &err);
                        brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
                        bus->clkstate = CLK_PENDING;
 
                        return 0;
                } else if (bus->clkstate == CLK_PENDING) {
                        /* Cancel CA-only interrupt filter */
-                       devctl = brcmf_sdiod_regrb(bus->sdiodev,
+                       devctl = brcmf_sdiod_readb(bus->sdiodev,
                                                   SBSDIO_DEVICE_CTL, &err);
                        devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
-                       brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-                                         devctl, &err);
+                       brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+                                          devctl, &err);
                }
 
                /* Otherwise, wait here (polling) for HT Avail */
                timeout = jiffies +
                          msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
                while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
-                       clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+                       clkctl = brcmf_sdiod_readb(bus->sdiodev,
                                                   SBSDIO_FUNC1_CHIPCLKCSR,
                                                   &err);
                        if (time_after(jiffies, timeout))
@@ -852,16 +851,16 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 
                if (bus->clkstate == CLK_PENDING) {
                        /* Cancel CA-only interrupt filter */
-                       devctl = brcmf_sdiod_regrb(bus->sdiodev,
+                       devctl = brcmf_sdiod_readb(bus->sdiodev,
                                                   SBSDIO_DEVICE_CTL, &err);
                        devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
-                       brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-                                         devctl, &err);
+                       brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+                                          devctl, &err);
                }
 
                bus->clkstate = CLK_SDONLY;
-               brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-                                 clkreq, &err);
+               brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+                                  clkreq, &err);
                brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
                if (err) {
                        brcmf_err("Failed access turning clock off: %d\n",
@@ -951,14 +950,14 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
 
                /* Going to sleep */
                if (sleep) {
-                       clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
+                       clkcsr = brcmf_sdiod_readb(bus->sdiodev,
                                                   SBSDIO_FUNC1_CHIPCLKCSR,
                                                   &err);
                        if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
                                brcmf_dbg(SDIO, "no clock, set ALP\n");
-                               brcmf_sdiod_regwb(bus->sdiodev,
-                                                 SBSDIO_FUNC1_CHIPCLKCSR,
-                                                 SBSDIO_ALP_AVAIL_REQ, &err);
+                               brcmf_sdiod_writeb(bus->sdiodev,
+                                                  SBSDIO_FUNC1_CHIPCLKCSR,
+                                                  SBSDIO_ALP_AVAIL_REQ, &err);
                        }
                        err = brcmf_sdio_kso_control(bus, false);
                } else {
@@ -1087,12 +1086,10 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
        brcmf_dbg(SDIO, "Enter\n");
 
        /* Read mailbox data and ack that we did so */
-       ret = r_sdreg32(bus, &hmb_data,
-                       offsetof(struct sdpcmd_regs, tohostmailboxdata));
+       ret = r_sdreg32(bus, &hmb_data, SD_REG(tohostmailboxdata));
 
        if (ret == 0)
-               w_sdreg32(bus, SMB_INT_ACK,
-                         offsetof(struct sdpcmd_regs, tosbmailbox));
+               w_sdreg32(bus, SMB_INT_ACK, SD_REG(tosbmailbox));
        bus->sdcnt.f1regdata += 2;
 
        /* dongle indicates the firmware has halted/crashed */
@@ -1178,16 +1175,16 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
        if (abort)
                brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
 
-       brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
-                         SFC_RF_TERM, &err);
+       brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM,
+                          &err);
        bus->sdcnt.f1regdata++;
 
        /* Wait until the packet has been flushed (device/FIFO stable) */
        for (lastrbc = retries = 0xffff; retries > 0; retries--) {
-               hi = brcmf_sdiod_regrb(bus->sdiodev,
-                                      SBSDIO_FUNC1_RFRAMEBCHI, &err);
-               lo = brcmf_sdiod_regrb(bus->sdiodev,
-                                      SBSDIO_FUNC1_RFRAMEBCLO, &err);
+               hi = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCHI,
+                                      &err);
+               lo = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCLO,
+                                      &err);
                bus->sdcnt.f1regdata += 2;
 
                if ((hi == 0) && (lo == 0))
@@ -1207,8 +1204,7 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 
        if (rtx) {
                bus->sdcnt.rxrtx++;
-               err = w_sdreg32(bus, SMB_NAK,
-                               offsetof(struct sdpcmd_regs, tosbmailbox));
+               err = w_sdreg32(bus, SMB_NAK, SD_REG(tosbmailbox));
 
                bus->sdcnt.f1regdata++;
                if (err == 0)
@@ -1229,12 +1225,12 @@ static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
        bus->sdcnt.tx_sderrs++;
 
        brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
-       brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
+       brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
        bus->sdcnt.f1regdata++;
 
        for (i = 0; i < 3; i++) {
-               hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
-               lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+               hi = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+               lo = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
                bus->sdcnt.f1regdata += 2;
                if ((hi == 0) && (lo == 0))
                        break;
@@ -2333,9 +2329,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
                if (!bus->intr) {
                        /* Check device status, signal pending interrupt */
                        sdio_claim_host(bus->sdiodev->func[1]);
-                       ret = r_sdreg32(bus, &intstatus,
-                                       offsetof(struct sdpcmd_regs,
-                                                intstatus));
+                       ret = r_sdreg32(bus, &intstatus, SD_REG(intstatus));
                        sdio_release_host(bus->sdiodev->func[1]);
                        bus->sdcnt.f2txdata++;
                        if (ret != 0)
@@ -2441,16 +2435,16 @@ static void brcmf_sdio_bus_stop(struct device *dev)
                brcmf_sdio_bus_sleep(bus, false, false);
 
                /* Disable and clear interrupts at the chip level also */
-               w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
+               w_sdreg32(bus, 0, SD_REG(hostintmask));
                local_hostintmask = bus->hostintmask;
                bus->hostintmask = 0;
 
                /* Force backplane clocks to assure F2 interrupt propagates */
-               saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+               saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
                                            &err);
                if (!err)
-                       brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-                                         (saveclk | SBSDIO_FORCE_HT), &err);
+                       brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+                                          (saveclk | SBSDIO_FORCE_HT), &err);
                if (err)
                        brcmf_err("Failed to force clock for F2: err %d\n",
                                  err);
@@ -2460,8 +2454,7 @@ static void brcmf_sdio_bus_stop(struct device *dev)
                sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
 
                /* Clear any pending interrupts now that F2 is disabled */
-               w_sdreg32(bus, local_hostintmask,
-                         offsetof(struct sdpcmd_regs, intstatus));
+               w_sdreg32(bus, local_hostintmask, SD_REG(intstatus));
 
                sdio_release_host(sdiodev->func[1]);
        }
@@ -2501,15 +2494,14 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
 
 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 {
-       struct brcmf_core *buscore;
+       struct brcmf_core *buscore = bus->sdio_core;
        u32 addr;
        unsigned long val;
        int ret;
 
-       buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
-       addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
+       addr = buscore->base + SD_REG(intstatus);
 
-       val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
+       val = brcmf_sdiod_readl(bus->sdiodev, addr, &ret);
        bus->sdcnt.f1regdata++;
        if (ret != 0)
                return ret;
@@ -2519,7 +2511,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 
        /* Clear interrupts */
        if (val) {
-               brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
+               brcmf_sdiod_writel(bus->sdiodev, addr, val, &ret);
                bus->sdcnt.f1regdata++;
                atomic_or(val, &bus->intstatus);
        }
@@ -2545,23 +2537,23 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 
 #ifdef DEBUG
                /* Check for inconsistent device control */
-               devctl = brcmf_sdiod_regrb(bus->sdiodev,
-                                          SBSDIO_DEVICE_CTL, &err);
+               devctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+                                          &err);
 #endif                         /* DEBUG */
 
                /* Read CSR, if clock on switch to AVAIL, else ignore */
-               clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+               clkctl = brcmf_sdiod_readb(bus->sdiodev,
                                           SBSDIO_FUNC1_CHIPCLKCSR, &err);
 
                brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
                          devctl, clkctl);
 
                if (SBSDIO_HTAV(clkctl)) {
-                       devctl = brcmf_sdiod_regrb(bus->sdiodev,
+                       devctl = brcmf_sdiod_readb(bus->sdiodev,
                                                   SBSDIO_DEVICE_CTL, &err);
                        devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
-                       brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-                                         devctl, &err);
+                       brcmf_sdiod_writeb(bus->sdiodev,
+                                          SBSDIO_DEVICE_CTL, devctl, &err);
                        bus->clkstate = CLK_AVAIL;
                }
        }
@@ -2584,11 +2576,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
         */
        if (intstatus & I_HMB_FC_CHANGE) {
                intstatus &= ~I_HMB_FC_CHANGE;
-               err = w_sdreg32(bus, I_HMB_FC_CHANGE,
-                               offsetof(struct sdpcmd_regs, intstatus));
+               err = w_sdreg32(bus, I_HMB_FC_CHANGE, SD_REG(intstatus));
 
-               err = r_sdreg32(bus, &newstatus,
-                               offsetof(struct sdpcmd_regs, intstatus));
+               err = r_sdreg32(bus, &newstatus, SD_REG(intstatus));
                bus->sdcnt.f1regdata += 2;
                atomic_set(&bus->fcstate,
                           !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
@@ -3347,31 +3337,31 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
+       val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
        if (err) {
                brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
                return;
        }
 
        val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
-       brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
+       brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
        if (err) {
                brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
                return;
        }
 
        /* Add CMD14 Support */
-       brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
-                         (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
-                          SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
-                         &err);
+       brcmf_sdiod_func0_wb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
+                            (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
+                             SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
+                            &err);
        if (err) {
                brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
                return;
        }
 
-       brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-                         SBSDIO_FORCE_HT, &err);
+       brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+                          SBSDIO_FORCE_HT, &err);
        if (err) {
                brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
                return;
@@ -3385,16 +3375,17 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
 /* enable KSO bit */
 static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
 {
+       struct brcmf_core *core = bus->sdio_core;
        u8 val;
        int err = 0;
 
        brcmf_dbg(TRACE, "Enter\n");
 
        /* KSO bit added in SDIO core rev 12 */
-       if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
+       if (core->rev < 12)
                return 0;
 
-       val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
+       val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
        if (err) {
                brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
                return err;
@@ -3403,8 +3394,8 @@ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
        if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
                val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
                        SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
-               brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-                                 val, &err);
+               brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+                                  val, &err);
                if (err) {
                        brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
                        return err;
@@ -3420,6 +3411,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        struct brcmf_sdio *bus = sdiodev->bus;
+       struct brcmf_core *core = bus->sdio_core;
        uint pad_size;
        u32 value;
        int err;
@@ -3428,7 +3420,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
         * a device perspective, ie. bus:txglom affects the
         * bus transfers from device to host.
         */
-       if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
+       if (core->rev < 12) {
                /* for sdio core rev < 12, disable txgloming */
                value = 0;
                err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
@@ -3565,9 +3557,9 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
                                u8 devpend;
 
                                sdio_claim_host(bus->sdiodev->func[1]);
-                               devpend = brcmf_sdiod_regrb(bus->sdiodev,
-                                                           SDIO_CCCR_INTx,
-                                                           NULL);
+                               devpend = brcmf_sdiod_func0_rb(bus->sdiodev,
+                                                              SDIO_CCCR_INTx,
+                                                              NULL);
                                sdio_release_host(bus->sdiodev->func[1]);
                                intstatus = devpend & (INTR_STATUS_FUNC1 |
                                                       INTR_STATUS_FUNC2);
@@ -3705,12 +3697,12 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
                        }
                }
                addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
-               brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
-               cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+               brcmf_sdiod_writel(sdiodev, addr, 1, NULL);
+               cc_data_temp = brcmf_sdiod_readl(sdiodev, addr, NULL);
                cc_data_temp &= ~str_mask;
                drivestrength_sel <<= str_shift;
                cc_data_temp |= drivestrength_sel;
-               brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
+               brcmf_sdiod_writel(sdiodev, addr, cc_data_temp, NULL);
 
                brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
                          str_tab[i].strength, drivestrength, cc_data_temp);
@@ -3725,7 +3717,7 @@ static int brcmf_sdio_buscoreprep(void *ctx)
 
        /* Try forcing SDIO core to do ALPAvail request only */
        clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
-       brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+       brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
        if (err) {
                brcmf_err("error writing for HT off\n");
                return err;
@@ -3733,8 +3725,7 @@ static int brcmf_sdio_buscoreprep(void *ctx)
 
        /* If register supported, wait for ALPAvail and then force ALP */
        /* This may take up to 15 milliseconds */
-       clkval = brcmf_sdiod_regrb(sdiodev,
-                                  SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+       clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
 
        if ((clkval & ~SBSDIO_AVBITS) != clkset) {
                brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
@@ -3742,10 +3733,11 @@ static int brcmf_sdio_buscoreprep(void *ctx)
                return -EACCES;
        }
 
-       SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
-                                             SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
-                       !SBSDIO_ALPAV(clkval)),
-                       PMU_MAX_TRANSITION_DLY);
+       SPINWAIT(((clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+                                             NULL)),
+                !SBSDIO_ALPAV(clkval)),
+                PMU_MAX_TRANSITION_DLY);
+
        if (!SBSDIO_ALPAV(clkval)) {
                brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
                          clkval);
@@ -3753,11 +3745,11 @@ static int brcmf_sdio_buscoreprep(void *ctx)
        }
 
        clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
-       brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+       brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
        udelay(65);
 
        /* Also, disable the extra SDIO pull-ups */
-       brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+       brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
 
        return 0;
 }
@@ -3766,13 +3758,12 @@ static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
                                        u32 rstvec)
 {
        struct brcmf_sdio_dev *sdiodev = ctx;
-       struct brcmf_core *core;
+       struct brcmf_core *core = sdiodev->bus->sdio_core;
        u32 reg_addr;
 
        /* clear all interrupts */
-       core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
-       reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
-       brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+       reg_addr = core->base + SD_REG(intstatus);
+       brcmf_sdiod_writel(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
 
        if (rstvec)
                /* Write reset vector to address 0 */
@@ -3785,7 +3776,7 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
        struct brcmf_sdio_dev *sdiodev = ctx;
        u32 val, rev;
 
-       val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+       val = brcmf_sdiod_readl(sdiodev, addr, NULL);
        if ((sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 ||
             sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4339) &&
            addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
@@ -3802,7 +3793,7 @@ static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
 {
        struct brcmf_sdio_dev *sdiodev = ctx;
 
-       brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
+       brcmf_sdiod_writel(sdiodev, addr, val, NULL);
 }
 
 static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
@@ -3826,18 +3817,18 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
        sdio_claim_host(sdiodev->func[1]);
 
        pr_debug("F1 signature read @0x18000000=0x%4x\n",
-                brcmf_sdiod_regrl(sdiodev, SI_ENUM_BASE, NULL));
+                brcmf_sdiod_readl(sdiodev, SI_ENUM_BASE, NULL));
 
        /*
         * Force PLL off until brcmf_chip_attach()
         * programs PLL control regs
         */
 
-       brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-                         BRCMF_INIT_CLKCTL1, &err);
+       brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, BRCMF_INIT_CLKCTL1,
+                          &err);
        if (!err)
-               clkctl = brcmf_sdiod_regrb(sdiodev,
-                                          SBSDIO_FUNC1_CHIPCLKCSR, &err);
+               clkctl = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+                                          &err);
 
        if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
                brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
@@ -3851,6 +3842,12 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
                bus->ci = NULL;
                goto fail;
        }
+
+       /* Pick up the SDIO core info struct from chip.c */
+       bus->sdio_core   = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+       if (!bus->sdio_core)
+               goto fail;
+
        sdiodev->settings = brcmf_get_module_param(sdiodev->dev,
                                                   BRCMF_BUSTYPE_SDIO,
                                                   bus->ci->chip,
@@ -3897,25 +3894,25 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
        brcmf_sdio_drivestrengthinit(sdiodev, bus->ci, drivestrength);
 
        /* Set card control so an SDIO card reset does a WLAN backplane reset */
-       reg_val = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err);
+       reg_val = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err);
        if (err)
                goto fail;
 
        reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
 
-       brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
+       brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
        if (err)
                goto fail;
 
        /* set PMUControl so a backplane reset does PMU state reload */
        reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol);
-       reg_val = brcmf_sdiod_regrl(sdiodev, reg_addr, &err);
+       reg_val = brcmf_sdiod_readl(sdiodev, reg_addr, &err);
        if (err)
                goto fail;
 
        reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
 
-       brcmf_sdiod_regwl(sdiodev, reg_addr, reg_val, &err);
+       brcmf_sdiod_writel(sdiodev, reg_addr, reg_val, &err);
        if (err)
                goto fail;
 
@@ -4055,10 +4052,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
                goto release;
 
        /* Force clocks on backplane to be sure F2 interrupt propagates */
-       saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+       saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
        if (!err) {
-               brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-                                 (saveclk | SBSDIO_FORCE_HT), &err);
+               brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+                                  (saveclk | SBSDIO_FORCE_HT), &err);
        }
        if (err) {
                brcmf_err("Failed to force clock for F2: err %d\n", err);
@@ -4067,7 +4064,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 
        /* Enable function 2 (frame transfers) */
        w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
-                 offsetof(struct sdpcmd_regs, tosbmailboxdata));
+                 SD_REG(tosbmailboxdata));
        err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
 
 
@@ -4077,10 +4074,9 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
        if (!err) {
                /* Set up the interrupt mask and enable interrupts */
                bus->hostintmask = HOSTINTMASK;
-               w_sdreg32(bus, bus->hostintmask,
-                         offsetof(struct sdpcmd_regs, hostintmask));
+               w_sdreg32(bus, bus->hostintmask, SD_REG(hostintmask));
 
-               brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
+               brcmf_sdiod_writeb(sdiodev, SBSDIO_WATERMARK, 8, &err);
        } else {
                /* Disable F2 again */
                sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
@@ -4091,8 +4087,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
                brcmf_sdio_sr_init(bus);
        } else {
                /* Restore previous clock setting */
-               brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-                                 saveclk, &err);
+               brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+                                  saveclk, &err);
        }
 
        if (err == 0) {
@@ -4224,7 +4220,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
        bus->rxflow = false;
 
        /* Done with backplane-dependent accesses, can drop clock... */
-       brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+       brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
 
        sdio_release_host(bus->sdiodev->func[1]);
 
index f3da32fc6360b84e3439c51a53f3f920c282c277..01def16cd2361b268a13d9c73f3b36673c72d698 100644 (file)
 #define SBSDIO_NUM_FUNCTION            3
 
 /* function 0 vendor specific CCCR registers */
+
 #define SDIO_CCCR_BRCM_CARDCAP                 0xf0
-#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT   0x02
-#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT       0x04
-#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC       0x08
-#define SDIO_CCCR_BRCM_CARDCTRL                0xf1
-#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET      0x02
-#define SDIO_CCCR_BRCM_SEPINT                  0xf2
+#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT   BIT(1)
+#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT       BIT(2)
+#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC       BIT(3)
 
-#define  SDIO_SEPINT_MASK              0x01
-#define  SDIO_SEPINT_OE                        0x02
-#define  SDIO_SEPINT_ACT_HI            0x04
+#define SDIO_CCCR_BRCM_CARDCTRL                        0xf1
+#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET      BIT(1)
+
+#define SDIO_CCCR_BRCM_SEPINT                  0xf2
+#define SDIO_CCCR_BRCM_SEPINT_MASK             BIT(0)
+#define SDIO_CCCR_BRCM_SEPINT_OE               BIT(1)
+#define SDIO_CCCR_BRCM_SEPINT_ACT_HI           BIT(2)
 
 /* function 1 miscellaneous registers */
 
 /* with b15, maps to 32-bit SB access */
 #define SBSDIO_SB_ACCESS_2_4B_FLAG     0x08000
 
-/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
-
-#define SBSDIO_SBADDRLOW_MASK          0x80    /* Valid bits in SBADDRLOW */
-#define SBSDIO_SBADDRMID_MASK          0xff    /* Valid bits in SBADDRMID */
-#define SBSDIO_SBADDRHIGH_MASK         0xffU   /* Valid bits in SBADDRHIGH */
 /* Address bits from SBADDR regs */
 #define SBSDIO_SBWINDOW_MASK           0xffff8000
 
@@ -296,13 +293,24 @@ struct sdpcmd_regs {
 int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
 
-/* sdio device register access interface */
-u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
-                      int *ret);
-void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
-                      int *ret);
+/* SDIO device register access interface */
+/* Accessors for SDIO Function 0 */
+#define brcmf_sdiod_func0_rb(sdiodev, addr, r) \
+       sdio_readb((sdiodev)->func[0], (addr), (r))
+
+#define brcmf_sdiod_func0_wb(sdiodev, addr, v, ret) \
+       sdio_writeb((sdiodev)->func[0], (v), (addr), (ret))
+
+/* Accessors for SDIO Function 1 */
+#define brcmf_sdiod_readb(sdiodev, addr, r) \
+       sdio_readb((sdiodev)->func[1], (addr), (r))
+
+#define brcmf_sdiod_writeb(sdiodev, addr, v, ret) \
+       sdio_writeb((sdiodev)->func[1], (v), (addr), (ret))
+
+u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+                       int *ret);
 
 /* Buffer transfer to/from device (client) core via cmd53.
  *   fn:       function number
@@ -342,7 +350,7 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
                      u8 *data, uint size);
 
 /* Issue an abort to the specified function */
-int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, u8 fn);
 void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
                              enum brcmf_sdiod_state state);
index 763e8ba6b17812f8369ee913a4a19766bff68628..7e01981bc5c8bf70475712c0942aa8cfe36c7c94 100644 (file)
@@ -16049,8 +16049,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
                wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_UPDATEGAINU,
                                       rfseq_updategainu_events,
                                       rfseq_updategainu_dlys,
-                                      sizeof(rfseq_updategainu_events) /
-                                      sizeof(rfseq_updategainu_events[0]));
+                                      ARRAY_SIZE(rfseq_updategainu_events));
 
                mod_phy_reg(pi, 0x153, (0xff << 8), (90 << 8));
 
index dbf50ef6cd75822c78639217b077e11d1b4e9ebb..533bd4b0277ea0a1b1efdd93157410647cc5eba4 100644 (file)
@@ -14,6 +14,7 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <linux/kernel.h>
 #include <types.h>
 #include "phytbl_n.h"
 
@@ -4437,109 +4438,39 @@ static const u16 loft_lut_core1_rev0[] = {
 };
 
 const struct phytbl_info mimophytbl_info_rev0_volatile[] = {
-       {&bdi_tbl_rev0, sizeof(bdi_tbl_rev0) / sizeof(bdi_tbl_rev0[0]), 21, 0,
-        16}
-       ,
-       {&pltlut_tbl_rev0, sizeof(pltlut_tbl_rev0) / sizeof(pltlut_tbl_rev0[0]),
-        20, 0, 32}
-       ,
-       {&gainctrl_lut_core0_rev0,
-        sizeof(gainctrl_lut_core0_rev0) / sizeof(gainctrl_lut_core0_rev0[0]),
-        26, 192, 32}
-       ,
-       {&gainctrl_lut_core1_rev0,
-        sizeof(gainctrl_lut_core1_rev0) / sizeof(gainctrl_lut_core1_rev0[0]),
-        27, 192, 32}
-       ,
-
-       {&est_pwr_lut_core0_rev0,
-        sizeof(est_pwr_lut_core0_rev0) / sizeof(est_pwr_lut_core0_rev0[0]), 26,
-        0, 8}
-       ,
-       {&est_pwr_lut_core1_rev0,
-        sizeof(est_pwr_lut_core1_rev0) / sizeof(est_pwr_lut_core1_rev0[0]), 27,
-        0, 8}
-       ,
-       {&adj_pwr_lut_core0_rev0,
-        sizeof(adj_pwr_lut_core0_rev0) / sizeof(adj_pwr_lut_core0_rev0[0]), 26,
-        64, 8}
-       ,
-       {&adj_pwr_lut_core1_rev0,
-        sizeof(adj_pwr_lut_core1_rev0) / sizeof(adj_pwr_lut_core1_rev0[0]), 27,
-        64, 8}
-       ,
-       {&iq_lut_core0_rev0,
-        sizeof(iq_lut_core0_rev0) / sizeof(iq_lut_core0_rev0[0]), 26, 320, 32}
-       ,
-       {&iq_lut_core1_rev0,
-        sizeof(iq_lut_core1_rev0) / sizeof(iq_lut_core1_rev0[0]), 27, 320, 32}
-       ,
-       {&loft_lut_core0_rev0,
-        sizeof(loft_lut_core0_rev0) / sizeof(loft_lut_core0_rev0[0]), 26, 448,
-        16}
-       ,
-       {&loft_lut_core1_rev0,
-        sizeof(loft_lut_core1_rev0) / sizeof(loft_lut_core1_rev0[0]), 27, 448,
-        16}
-       ,
+       {&bdi_tbl_rev0, ARRAY_SIZE(bdi_tbl_rev0), 21, 0, 16},
+       {&pltlut_tbl_rev0, ARRAY_SIZE(pltlut_tbl_rev0), 20, 0, 32},
+       {&gainctrl_lut_core0_rev0, ARRAY_SIZE(gainctrl_lut_core0_rev0), 26, 192, 32},
+       {&gainctrl_lut_core1_rev0, ARRAY_SIZE(gainctrl_lut_core1_rev0), 27, 192, 32},
+       {&est_pwr_lut_core0_rev0, ARRAY_SIZE(est_pwr_lut_core0_rev0), 26, 0, 8},
+       {&est_pwr_lut_core1_rev0, ARRAY_SIZE(est_pwr_lut_core1_rev0), 27, 0, 8},
+       {&adj_pwr_lut_core0_rev0, ARRAY_SIZE(adj_pwr_lut_core0_rev0), 26, 64, 8},
+       {&adj_pwr_lut_core1_rev0, ARRAY_SIZE(adj_pwr_lut_core1_rev0), 27, 64, 8},
+       {&iq_lut_core0_rev0, ARRAY_SIZE(iq_lut_core0_rev0), 26, 320, 32},
+       {&iq_lut_core1_rev0, ARRAY_SIZE(iq_lut_core1_rev0), 27, 320, 32},
+       {&loft_lut_core0_rev0, ARRAY_SIZE(loft_lut_core0_rev0), 26, 448, 16},
+       {&loft_lut_core1_rev0, ARRAY_SIZE(loft_lut_core1_rev0), 27, 448, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev0[] = {
-       {&frame_struct_rev0,
-        sizeof(frame_struct_rev0) / sizeof(frame_struct_rev0[0]), 10, 0, 32}
-       ,
-       {&frame_lut_rev0, sizeof(frame_lut_rev0) / sizeof(frame_lut_rev0[0]),
-        24, 0, 8}
-       ,
-       {&tmap_tbl_rev0, sizeof(tmap_tbl_rev0) / sizeof(tmap_tbl_rev0[0]), 12,
-        0, 32}
-       ,
-       {&tdtrn_tbl_rev0, sizeof(tdtrn_tbl_rev0) / sizeof(tdtrn_tbl_rev0[0]),
-        14, 0, 32}
-       ,
-       {&intlv_tbl_rev0, sizeof(intlv_tbl_rev0) / sizeof(intlv_tbl_rev0[0]),
-        13, 0, 32}
-       ,
-       {&pilot_tbl_rev0, sizeof(pilot_tbl_rev0) / sizeof(pilot_tbl_rev0[0]),
-        11, 0, 16}
-       ,
-       {&tdi_tbl20_ant0_rev0,
-        sizeof(tdi_tbl20_ant0_rev0) / sizeof(tdi_tbl20_ant0_rev0[0]), 19, 128,
-        32}
-       ,
-       {&tdi_tbl20_ant1_rev0,
-        sizeof(tdi_tbl20_ant1_rev0) / sizeof(tdi_tbl20_ant1_rev0[0]), 19, 256,
-        32}
-       ,
-       {&tdi_tbl40_ant0_rev0,
-        sizeof(tdi_tbl40_ant0_rev0) / sizeof(tdi_tbl40_ant0_rev0[0]), 19, 640,
-        32}
-       ,
-       {&tdi_tbl40_ant1_rev0,
-        sizeof(tdi_tbl40_ant1_rev0) / sizeof(tdi_tbl40_ant1_rev0[0]), 19, 768,
-        32}
-       ,
-       {&chanest_tbl_rev0,
-        sizeof(chanest_tbl_rev0) / sizeof(chanest_tbl_rev0[0]), 22, 0, 32}
-       ,
-       {&mcs_tbl_rev0, sizeof(mcs_tbl_rev0) / sizeof(mcs_tbl_rev0[0]), 18, 0,
-        8}
-       ,
-       {&noise_var_tbl0_rev0,
-        sizeof(noise_var_tbl0_rev0) / sizeof(noise_var_tbl0_rev0[0]), 16, 0,
-        32}
-       ,
-       {&noise_var_tbl1_rev0,
-        sizeof(noise_var_tbl1_rev0) / sizeof(noise_var_tbl1_rev0[0]), 16, 128,
-        32}
-       ,
+       {&frame_struct_rev0, ARRAY_SIZE(frame_struct_rev0), 10, 0, 32},
+       {&frame_lut_rev0, ARRAY_SIZE(frame_lut_rev0), 24, 0, 8},
+       {&tmap_tbl_rev0, ARRAY_SIZE(tmap_tbl_rev0), 12, 0, 32},
+       {&tdtrn_tbl_rev0, ARRAY_SIZE(tdtrn_tbl_rev0), 14, 0, 32},
+       {&intlv_tbl_rev0, ARRAY_SIZE(intlv_tbl_rev0), 13, 0, 32},
+       {&pilot_tbl_rev0, ARRAY_SIZE(pilot_tbl_rev0), 11, 0, 16},
+       {&tdi_tbl20_ant0_rev0, ARRAY_SIZE(tdi_tbl20_ant0_rev0), 19, 128, 32},
+       {&tdi_tbl20_ant1_rev0, ARRAY_SIZE(tdi_tbl20_ant1_rev0), 19, 256, 32},
+       {&tdi_tbl40_ant0_rev0, ARRAY_SIZE(tdi_tbl40_ant0_rev0), 19, 640, 32},
+       {&tdi_tbl40_ant1_rev0, ARRAY_SIZE(tdi_tbl40_ant1_rev0), 19, 768, 32},
+       {&chanest_tbl_rev0, ARRAY_SIZE(chanest_tbl_rev0), 22, 0, 32},
+       {&mcs_tbl_rev0, ARRAY_SIZE(mcs_tbl_rev0), 18, 0, 8},
+       {&noise_var_tbl0_rev0, ARRAY_SIZE(noise_var_tbl0_rev0), 16, 0, 32},
+       {&noise_var_tbl1_rev0, ARRAY_SIZE(noise_var_tbl1_rev0), 16, 128, 32},
 };
 
-const u32 mimophytbl_info_sz_rev0 =
-       sizeof(mimophytbl_info_rev0) / sizeof(mimophytbl_info_rev0[0]);
-const u32 mimophytbl_info_sz_rev0_volatile =
-       sizeof(mimophytbl_info_rev0_volatile) /
-       sizeof(mimophytbl_info_rev0_volatile[0]);
+const u32 mimophytbl_info_sz_rev0 = ARRAY_SIZE(mimophytbl_info_rev0);
+const u32 mimophytbl_info_sz_rev0_volatile = ARRAY_SIZE(mimophytbl_info_rev0_volatile);
 
 static const u16 ant_swctrl_tbl_rev3[] = {
        0x0082,
@@ -9363,132 +9294,53 @@ static const u32 papd_cal_scalars_tbl_core1_rev3[] = {
 };
 
 const struct phytbl_info mimophytbl_info_rev3_volatile[] = {
-       {&ant_swctrl_tbl_rev3,
-        sizeof(ant_swctrl_tbl_rev3) / sizeof(ant_swctrl_tbl_rev3[0]), 9, 0, 16}
-       ,
+       {&ant_swctrl_tbl_rev3, ARRAY_SIZE(ant_swctrl_tbl_rev3), 9, 0, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev3_volatile1[] = {
-       {&ant_swctrl_tbl_rev3_1,
-        sizeof(ant_swctrl_tbl_rev3_1) / sizeof(ant_swctrl_tbl_rev3_1[0]), 9, 0,
-        16}
-       ,
+       {&ant_swctrl_tbl_rev3_1, ARRAY_SIZE(ant_swctrl_tbl_rev3_1), 9, 0, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev3_volatile2[] = {
-       {&ant_swctrl_tbl_rev3_2,
-        sizeof(ant_swctrl_tbl_rev3_2) / sizeof(ant_swctrl_tbl_rev3_2[0]), 9, 0,
-        16}
-       ,
+       {&ant_swctrl_tbl_rev3_2, ARRAY_SIZE(ant_swctrl_tbl_rev3_2), 9, 0, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev3_volatile3[] = {
-       {&ant_swctrl_tbl_rev3_3,
-        sizeof(ant_swctrl_tbl_rev3_3) / sizeof(ant_swctrl_tbl_rev3_3[0]), 9, 0,
-        16}
-       ,
+       {&ant_swctrl_tbl_rev3_3, ARRAY_SIZE(ant_swctrl_tbl_rev3_3), 9, 0, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev3[] = {
-       {&frame_struct_rev3,
-        sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32}
-       ,
-       {&pilot_tbl_rev3, sizeof(pilot_tbl_rev3) / sizeof(pilot_tbl_rev3[0]),
-        11, 0, 16}
-       ,
-       {&tmap_tbl_rev3, sizeof(tmap_tbl_rev3) / sizeof(tmap_tbl_rev3[0]), 12,
-        0, 32}
-       ,
-       {&intlv_tbl_rev3, sizeof(intlv_tbl_rev3) / sizeof(intlv_tbl_rev3[0]),
-        13, 0, 32}
-       ,
-       {&tdtrn_tbl_rev3, sizeof(tdtrn_tbl_rev3) / sizeof(tdtrn_tbl_rev3[0]),
-        14, 0, 32}
-       ,
-       {&noise_var_tbl_rev3,
-        sizeof(noise_var_tbl_rev3) / sizeof(noise_var_tbl_rev3[0]), 16, 0, 32}
-       ,
-       {&mcs_tbl_rev3, sizeof(mcs_tbl_rev3) / sizeof(mcs_tbl_rev3[0]), 18, 0,
-        16}
-       ,
-       {&tdi_tbl20_ant0_rev3,
-        sizeof(tdi_tbl20_ant0_rev3) / sizeof(tdi_tbl20_ant0_rev3[0]), 19, 128,
-        32}
-       ,
-       {&tdi_tbl20_ant1_rev3,
-        sizeof(tdi_tbl20_ant1_rev3) / sizeof(tdi_tbl20_ant1_rev3[0]), 19, 256,
-        32}
-       ,
-       {&tdi_tbl40_ant0_rev3,
-        sizeof(tdi_tbl40_ant0_rev3) / sizeof(tdi_tbl40_ant0_rev3[0]), 19, 640,
-        32}
-       ,
-       {&tdi_tbl40_ant1_rev3,
-        sizeof(tdi_tbl40_ant1_rev3) / sizeof(tdi_tbl40_ant1_rev3[0]), 19, 768,
-        32}
-       ,
-       {&pltlut_tbl_rev3, sizeof(pltlut_tbl_rev3) / sizeof(pltlut_tbl_rev3[0]),
-        20, 0, 32}
-       ,
-       {&chanest_tbl_rev3,
-        sizeof(chanest_tbl_rev3) / sizeof(chanest_tbl_rev3[0]), 22, 0, 32}
-       ,
-       {&frame_lut_rev3, sizeof(frame_lut_rev3) / sizeof(frame_lut_rev3[0]),
-        24, 0, 8}
-       ,
-       {&est_pwr_lut_core0_rev3,
-        sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
-        0, 8}
-       ,
-       {&est_pwr_lut_core1_rev3,
-        sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
-        0, 8}
-       ,
-       {&adj_pwr_lut_core0_rev3,
-        sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
-        64, 8}
-       ,
-       {&adj_pwr_lut_core1_rev3,
-        sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
-        64, 8}
-       ,
-       {&gainctrl_lut_core0_rev3,
-        sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
-        26, 192, 32}
-       ,
-       {&gainctrl_lut_core1_rev3,
-        sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
-        27, 192, 32}
-       ,
-       {&iq_lut_core0_rev3,
-        sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
-       ,
-       {&iq_lut_core1_rev3,
-        sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
-       ,
-       {&loft_lut_core0_rev3,
-        sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
-        16}
-       ,
-       {&loft_lut_core1_rev3,
-        sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
-        16}
+       {&frame_struct_rev3, ARRAY_SIZE(frame_struct_rev3), 10, 0, 32},
+       {&pilot_tbl_rev3, ARRAY_SIZE(pilot_tbl_rev3), 11, 0, 16},
+       {&tmap_tbl_rev3, ARRAY_SIZE(tmap_tbl_rev3), 12, 0, 32},
+       {&intlv_tbl_rev3, ARRAY_SIZE(intlv_tbl_rev3), 13, 0, 32},
+       {&tdtrn_tbl_rev3, ARRAY_SIZE(tdtrn_tbl_rev3), 14, 0, 32},
+       {&noise_var_tbl_rev3, ARRAY_SIZE(noise_var_tbl_rev3), 16, 0, 32},
+       {&mcs_tbl_rev3, ARRAY_SIZE(mcs_tbl_rev3), 18, 0, 16},
+       {&tdi_tbl20_ant0_rev3, ARRAY_SIZE(tdi_tbl20_ant0_rev3), 19, 128, 32},
+       {&tdi_tbl20_ant1_rev3, ARRAY_SIZE(tdi_tbl20_ant1_rev3), 19, 256, 32},
+       {&tdi_tbl40_ant0_rev3, ARRAY_SIZE(tdi_tbl40_ant0_rev3), 19, 640, 32},
+       {&tdi_tbl40_ant1_rev3, ARRAY_SIZE(tdi_tbl40_ant1_rev3), 19, 768, 32},
+       {&pltlut_tbl_rev3, ARRAY_SIZE(pltlut_tbl_rev3), 20, 0, 32},
+       {&chanest_tbl_rev3, ARRAY_SIZE(chanest_tbl_rev3), 22, 0, 32},
+       {&frame_lut_rev3, ARRAY_SIZE(frame_lut_rev3), 24, 0, 8},
+       {&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+       {&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+       {&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+       {&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+       {&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+       {&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+       {&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+       {&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+       {&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+       {&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16}
 };
 
-const u32 mimophytbl_info_sz_rev3 =
-       sizeof(mimophytbl_info_rev3) / sizeof(mimophytbl_info_rev3[0]);
-const u32 mimophytbl_info_sz_rev3_volatile =
-       sizeof(mimophytbl_info_rev3_volatile) /
-       sizeof(mimophytbl_info_rev3_volatile[0]);
-const u32 mimophytbl_info_sz_rev3_volatile1 =
-       sizeof(mimophytbl_info_rev3_volatile1) /
-       sizeof(mimophytbl_info_rev3_volatile1[0]);
-const u32 mimophytbl_info_sz_rev3_volatile2 =
-       sizeof(mimophytbl_info_rev3_volatile2) /
-       sizeof(mimophytbl_info_rev3_volatile2[0]);
-const u32 mimophytbl_info_sz_rev3_volatile3 =
-       sizeof(mimophytbl_info_rev3_volatile3) /
-       sizeof(mimophytbl_info_rev3_volatile3[0]);
+const u32 mimophytbl_info_sz_rev3 = ARRAY_SIZE(mimophytbl_info_rev3);
+const u32 mimophytbl_info_sz_rev3_volatile = ARRAY_SIZE(mimophytbl_info_rev3_volatile);
+const u32 mimophytbl_info_sz_rev3_volatile1 = ARRAY_SIZE(mimophytbl_info_rev3_volatile1);
+const u32 mimophytbl_info_sz_rev3_volatile2 = ARRAY_SIZE(mimophytbl_info_rev3_volatile2);
+const u32 mimophytbl_info_sz_rev3_volatile3 = ARRAY_SIZE(mimophytbl_info_rev3_volatile3);
 
 static const u32 tmap_tbl_rev7[] = {
        0x8a88aa80,
@@ -10469,162 +10321,58 @@ static const u32 papd_cal_scalars_tbl_core1_rev7[] = {
 };
 
 const struct phytbl_info mimophytbl_info_rev7[] = {
-       {&frame_struct_rev3,
-        sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32}
-       ,
-       {&pilot_tbl_rev3, sizeof(pilot_tbl_rev3) / sizeof(pilot_tbl_rev3[0]),
-        11, 0, 16}
-       ,
-       {&tmap_tbl_rev7, sizeof(tmap_tbl_rev7) / sizeof(tmap_tbl_rev7[0]), 12,
-        0, 32}
-       ,
-       {&intlv_tbl_rev3, sizeof(intlv_tbl_rev3) / sizeof(intlv_tbl_rev3[0]),
-        13, 0, 32}
-       ,
-       {&tdtrn_tbl_rev3, sizeof(tdtrn_tbl_rev3) / sizeof(tdtrn_tbl_rev3[0]),
-        14, 0, 32}
-       ,
-       {&noise_var_tbl_rev7,
-        sizeof(noise_var_tbl_rev7) / sizeof(noise_var_tbl_rev7[0]), 16, 0, 32}
-       ,
-       {&mcs_tbl_rev3, sizeof(mcs_tbl_rev3) / sizeof(mcs_tbl_rev3[0]), 18, 0,
-        16}
-       ,
-       {&tdi_tbl20_ant0_rev3,
-        sizeof(tdi_tbl20_ant0_rev3) / sizeof(tdi_tbl20_ant0_rev3[0]), 19, 128,
-        32}
-       ,
-       {&tdi_tbl20_ant1_rev3,
-        sizeof(tdi_tbl20_ant1_rev3) / sizeof(tdi_tbl20_ant1_rev3[0]), 19, 256,
-        32}
-       ,
-       {&tdi_tbl40_ant0_rev3,
-        sizeof(tdi_tbl40_ant0_rev3) / sizeof(tdi_tbl40_ant0_rev3[0]), 19, 640,
-        32}
-       ,
-       {&tdi_tbl40_ant1_rev3,
-        sizeof(tdi_tbl40_ant1_rev3) / sizeof(tdi_tbl40_ant1_rev3[0]), 19, 768,
-        32}
-       ,
-       {&pltlut_tbl_rev3, sizeof(pltlut_tbl_rev3) / sizeof(pltlut_tbl_rev3[0]),
-        20, 0, 32}
-       ,
-       {&chanest_tbl_rev3,
-        sizeof(chanest_tbl_rev3) / sizeof(chanest_tbl_rev3[0]), 22, 0, 32}
-       ,
-       {&frame_lut_rev3, sizeof(frame_lut_rev3) / sizeof(frame_lut_rev3[0]),
-        24, 0, 8}
-       ,
-       {&est_pwr_lut_core0_rev3,
-        sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
-        0, 8}
-       ,
-       {&est_pwr_lut_core1_rev3,
-        sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
-        0, 8}
-       ,
-       {&adj_pwr_lut_core0_rev3,
-        sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
-        64, 8}
-       ,
-       {&adj_pwr_lut_core1_rev3,
-        sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
-        64, 8}
-       ,
-       {&gainctrl_lut_core0_rev3,
-        sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
-        26, 192, 32}
-       ,
-       {&gainctrl_lut_core1_rev3,
-        sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
-        27, 192, 32}
-       ,
-       {&iq_lut_core0_rev3,
-        sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
-       ,
-       {&iq_lut_core1_rev3,
-        sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
-       ,
-       {&loft_lut_core0_rev3,
-        sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
-        16}
-       ,
-       {&loft_lut_core1_rev3,
-        sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
-        16}
-       ,
+       {&frame_struct_rev3, ARRAY_SIZE(frame_struct_rev3), 10, 0, 32},
+       {&pilot_tbl_rev3, ARRAY_SIZE(pilot_tbl_rev3), 11, 0, 16},
+       {&tmap_tbl_rev7, ARRAY_SIZE(tmap_tbl_rev7), 12, 0, 32},
+       {&intlv_tbl_rev3, ARRAY_SIZE(intlv_tbl_rev3), 13, 0, 32},
+       {&tdtrn_tbl_rev3, ARRAY_SIZE(tdtrn_tbl_rev3), 14, 0, 32},
+       {&noise_var_tbl_rev7, ARRAY_SIZE(noise_var_tbl_rev7), 16, 0, 32},
+       {&mcs_tbl_rev3, ARRAY_SIZE(mcs_tbl_rev3), 18, 0, 16},
+       {&tdi_tbl20_ant0_rev3, ARRAY_SIZE(tdi_tbl20_ant0_rev3), 19, 128, 32},
+       {&tdi_tbl20_ant1_rev3, ARRAY_SIZE(tdi_tbl20_ant1_rev3), 19, 256, 32},
+       {&tdi_tbl40_ant0_rev3, ARRAY_SIZE(tdi_tbl40_ant0_rev3), 19, 640, 32},
+       {&tdi_tbl40_ant1_rev3, ARRAY_SIZE(tdi_tbl40_ant1_rev3), 19, 768, 32},
+       {&pltlut_tbl_rev3, ARRAY_SIZE(pltlut_tbl_rev3), 20, 0, 32},
+       {&chanest_tbl_rev3, ARRAY_SIZE(chanest_tbl_rev3), 22, 0, 32},
+       {&frame_lut_rev3, ARRAY_SIZE(frame_lut_rev3), 24, 0, 8},
+       {&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+       {&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+       {&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+       {&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+       {&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+       {&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+       {&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+       {&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+       {&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+       {&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16},
        {&papd_comp_rfpwr_tbl_core0_rev3,
-        sizeof(papd_comp_rfpwr_tbl_core0_rev3) /
-        sizeof(papd_comp_rfpwr_tbl_core0_rev3[0]), 26, 576, 16}
-       ,
+        ARRAY_SIZE(papd_comp_rfpwr_tbl_core0_rev3), 26, 576, 16},
        {&papd_comp_rfpwr_tbl_core1_rev3,
-        sizeof(papd_comp_rfpwr_tbl_core1_rev3) /
-        sizeof(papd_comp_rfpwr_tbl_core1_rev3[0]), 27, 576, 16}
-       ,
+        ARRAY_SIZE(papd_comp_rfpwr_tbl_core1_rev3), 27, 576, 16},
        {&papd_comp_epsilon_tbl_core0_rev7,
-        sizeof(papd_comp_epsilon_tbl_core0_rev7) /
-        sizeof(papd_comp_epsilon_tbl_core0_rev7[0]), 31, 0, 32}
-       ,
+        ARRAY_SIZE(papd_comp_epsilon_tbl_core0_rev7), 31, 0, 32},
        {&papd_cal_scalars_tbl_core0_rev7,
-        sizeof(papd_cal_scalars_tbl_core0_rev7) /
-        sizeof(papd_cal_scalars_tbl_core0_rev7[0]), 32, 0, 32}
-       ,
+        ARRAY_SIZE(papd_cal_scalars_tbl_core0_rev7), 32, 0, 32},
        {&papd_comp_epsilon_tbl_core1_rev7,
-        sizeof(papd_comp_epsilon_tbl_core1_rev7) /
-        sizeof(papd_comp_epsilon_tbl_core1_rev7[0]), 33, 0, 32}
-       ,
+        ARRAY_SIZE(papd_comp_epsilon_tbl_core1_rev7), 33, 0, 32},
        {&papd_cal_scalars_tbl_core1_rev7,
-        sizeof(papd_cal_scalars_tbl_core1_rev7) /
-        sizeof(papd_cal_scalars_tbl_core1_rev7[0]), 34, 0, 32}
-       ,
+        ARRAY_SIZE(papd_cal_scalars_tbl_core1_rev7), 34, 0, 32},
 };
 
-const u32 mimophytbl_info_sz_rev7 =
-       sizeof(mimophytbl_info_rev7) / sizeof(mimophytbl_info_rev7[0]);
+const u32 mimophytbl_info_sz_rev7 = ARRAY_SIZE(mimophytbl_info_rev7);
 
 const struct phytbl_info mimophytbl_info_rev16[] = {
-       {&noise_var_tbl_rev7,
-        sizeof(noise_var_tbl_rev7) / sizeof(noise_var_tbl_rev7[0]), 16, 0, 32}
-       ,
-       {&est_pwr_lut_core0_rev3,
-        sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
-        0, 8}
-       ,
-       {&est_pwr_lut_core1_rev3,
-        sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
-        0, 8}
-       ,
-       {&adj_pwr_lut_core0_rev3,
-        sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
-        64, 8}
-       ,
-       {&adj_pwr_lut_core1_rev3,
-        sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
-        64, 8}
-       ,
-       {&gainctrl_lut_core0_rev3,
-        sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
-        26, 192, 32}
-       ,
-       {&gainctrl_lut_core1_rev3,
-        sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
-        27, 192, 32}
-       ,
-       {&iq_lut_core0_rev3,
-        sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
-       ,
-       {&iq_lut_core1_rev3,
-        sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
-       ,
-       {&loft_lut_core0_rev3,
-        sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
-        16}
-       ,
-       {&loft_lut_core1_rev3,
-        sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
-        16}
-       ,
+       {&noise_var_tbl_rev7, ARRAY_SIZE(noise_var_tbl_rev7), 16, 0, 32},
+       {&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+       {&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+       {&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+       {&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+       {&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+       {&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+       {&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+       {&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+       {&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+       {&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16},
 };
 
-const u32 mimophytbl_info_sz_rev16 =
-       sizeof(mimophytbl_info_rev16) / sizeof(mimophytbl_info_rev16[0]);
+const u32 mimophytbl_info_sz_rev16 = ARRAY_SIZE(mimophytbl_info_rev16);
index ff136f299a0a88aebf2180b4ea89cf6e1a162f53..e2c151ae8649b2bc46bae4a5442b9cd8472fee8c 100644 (file)
@@ -9,7 +9,7 @@ iwlwifi-objs            += iwl-phy-db.o iwl-nvm-parse.o
 iwlwifi-objs           += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
 iwlwifi-objs           += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
 iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
-iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o
+iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
 iwlwifi-objs           += iwl-trans.o
 iwlwifi-objs           += fw/notif-wait.o
 iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
new file mode 100644 (file)
index 0000000..48f6f80
--- /dev/null
@@ -0,0 +1,216 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+
+/* Highest firmware API version supported */
+#define IWL_22000_UCODE_API_MAX        36
+
+/* Lowest firmware API version supported */
+#define IWL_22000_UCODE_API_MIN        24
+
+/* NVM versions */
+#define IWL_22000_NVM_VERSION          0x0a1d
+#define IWL_22000_TX_POWER_VERSION     0xffff /* meaningless */
+
+/* Memory offsets and lengths */
+#define IWL_22000_DCCM_OFFSET          0x800000 /* LMAC1 */
+#define IWL_22000_DCCM_LEN             0x10000 /* LMAC1 */
+#define IWL_22000_DCCM2_OFFSET         0x880000
+#define IWL_22000_DCCM2_LEN            0x8000
+#define IWL_22000_SMEM_OFFSET          0x400000
+#define IWL_22000_SMEM_LEN             0xD0000
+
+#define IWL_22000_JF_FW_PRE    "iwlwifi-Qu-a0-jf-b0-"
+#define IWL_22000_HR_FW_PRE    "iwlwifi-Qu-a0-hr-a0-"
+#define IWL_22000_HR_CDB_FW_PRE        "iwlwifi-QuIcp-z0-hrcdb-a0-"
+#define IWL_22000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
+#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
+#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
+
+#define IWL_22000_HR_MODULE_FIRMWARE(api) \
+       IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_JF_MODULE_FIRMWARE(api) \
+       IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
+       IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
+       IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
+       IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_22000                10
+
+static const struct iwl_base_params iwl_22000_base_params = {
+       .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+       .num_of_queues = 512,
+       .shadow_ram_support = true,
+       .led_compensation = 57,
+       .wd_timeout = IWL_LONG_WD_TIMEOUT,
+       .max_event_log_size = 512,
+       .shadow_reg_enable = true,
+       .pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl_22000_ht_params = {
+       .stbc = true,
+       .ldpc = true,
+       .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_22000                                               \
+       .ucode_api_max = IWL_22000_UCODE_API_MAX,                       \
+       .ucode_api_min = IWL_22000_UCODE_API_MIN,                       \
+       .device_family = IWL_DEVICE_FAMILY_22000,                       \
+       .max_inst_size = IWL60_RTC_INST_SIZE,                           \
+       .max_data_size = IWL60_RTC_DATA_SIZE,                           \
+       .base_params = &iwl_22000_base_params,                          \
+       .led_mode = IWL_LED_RF_STATE,                                   \
+       .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000,          \
+       .non_shared_ant = ANT_A,                                        \
+       .dccm_offset = IWL_22000_DCCM_OFFSET,                           \
+       .dccm_len = IWL_22000_DCCM_LEN,                                 \
+       .dccm2_offset = IWL_22000_DCCM2_OFFSET,                         \
+       .dccm2_len = IWL_22000_DCCM2_LEN,                               \
+       .smem_offset = IWL_22000_SMEM_OFFSET,                           \
+       .smem_len = IWL_22000_SMEM_LEN,                                 \
+       .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,           \
+       .apmg_not_supported = true,                                     \
+       .mq_rx_supported = true,                                        \
+       .vht_mu_mimo_supported = true,                                  \
+       .mac_addr_from_csr = true,                                      \
+       .use_tfh = true,                                                \
+       .rf_id = true,                                                  \
+       .gen2 = true,                                                   \
+       .nvm_type = IWL_NVM_EXT,                                        \
+       .dbgc_supported = true,                                         \
+       .tx_cmd_queue_size = 32,                                        \
+       .min_umac_error_event_table = 0x400000
+
+const struct iwl_cfg iwl22000_2ac_cfg_hr = {
+       .name = "Intel(R) Dual Band Wireless AC 22000",
+       .fw_name_pre = IWL_22000_HR_FW_PRE,
+       IWL_DEVICE_22000,
+       .ht_params = &iwl_22000_ht_params,
+       .nvm_ver = IWL_22000_NVM_VERSION,
+       .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
+       .name = "Intel(R) Dual Band Wireless AC 22000",
+       .fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
+       IWL_DEVICE_22000,
+       .ht_params = &iwl_22000_ht_params,
+       .nvm_ver = IWL_22000_NVM_VERSION,
+       .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+       .cdb = true,
+};
+
+const struct iwl_cfg iwl22000_2ac_cfg_jf = {
+       .name = "Intel(R) Dual Band Wireless AC 22000",
+       .fw_name_pre = IWL_22000_JF_FW_PRE,
+       IWL_DEVICE_22000,
+       .ht_params = &iwl_22000_ht_params,
+       .nvm_ver = IWL_22000_NVM_VERSION,
+       .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_hr = {
+       .name = "Intel(R) Dual Band Wireless AX 22000",
+       .fw_name_pre = IWL_22000_HR_FW_PRE,
+       IWL_DEVICE_22000,
+       .ht_params = &iwl_22000_ht_params,
+       .nvm_ver = IWL_22000_NVM_VERSION,
+       .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = {
+       .name = "Intel(R) Dual Band Wireless AX 22000",
+       .fw_name_pre = IWL_22000_HR_F0_FW_PRE,
+       IWL_DEVICE_22000,
+       .ht_params = &iwl_22000_ht_params,
+       .nvm_ver = IWL_22000_NVM_VERSION,
+       .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
+       .name = "Intel(R) Dual Band Wireless AX 22000",
+       .fw_name_pre = IWL_22000_JF_B0_FW_PRE,
+       IWL_DEVICE_22000,
+       .ht_params = &iwl_22000_ht_params,
+       .nvm_ver = IWL_22000_NVM_VERSION,
+       .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
+       .name = "Intel(R) Dual Band Wireless AX 22000",
+       .fw_name_pre = IWL_22000_HR_A0_FW_PRE,
+       IWL_DEVICE_22000,
+       .ht_params = &iwl_22000_ht_params,
+       .nvm_ver = IWL_22000_NVM_VERSION,
+       .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
index 9bb7c19d48eb4cc5171c05054333774cc0c52e06..3f4d9bac9f73a03d5c8b814876c03bf998b7f148 100644 (file)
@@ -70,8 +70,8 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  34
-#define IWL8265_UCODE_API_MAX  34
+#define IWL8000_UCODE_API_MAX  36
+#define IWL8265_UCODE_API_MAX  36
 
 /* Lowest firmware API version supported */
 #define IWL8000_UCODE_API_MIN  22
index e7e75b4580052f4591365b4eac580f809324282b..90a1d14cf7d2b4040a72dc26ce3d2c4f37b7d682 100644 (file)
@@ -55,7 +55,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX  34
+#define IWL9000_UCODE_API_MAX  36
 
 /* Lowest firmware API version supported */
 #define IWL9000_UCODE_API_MIN  30
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
deleted file mode 100644 (file)
index 705f83b..0000000
+++ /dev/null
@@ -1,216 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015-2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/stringify.h>
-#include "iwl-config.h"
-#include "iwl-agn-hw.h"
-
-/* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX 34
-
-/* Lowest firmware API version supported */
-#define IWL_A000_UCODE_API_MIN 24
-
-/* NVM versions */
-#define IWL_A000_NVM_VERSION           0x0a1d
-#define IWL_A000_TX_POWER_VERSION      0xffff /* meaningless */
-
-/* Memory offsets and lengths */
-#define IWL_A000_DCCM_OFFSET           0x800000 /* LMAC1 */
-#define IWL_A000_DCCM_LEN              0x10000 /* LMAC1 */
-#define IWL_A000_DCCM2_OFFSET          0x880000
-#define IWL_A000_DCCM2_LEN             0x8000
-#define IWL_A000_SMEM_OFFSET           0x400000
-#define IWL_A000_SMEM_LEN              0xD0000
-
-#define IWL_A000_JF_FW_PRE     "iwlwifi-Qu-a0-jf-b0-"
-#define IWL_A000_HR_FW_PRE     "iwlwifi-Qu-a0-hr-a0-"
-#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
-#define IWL_A000_HR_F0_FW_PRE  "iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_A000_JF_B0_FW_PRE  "iwlwifi-QuQnj-a0-jf-b0-"
-#define IWL_A000_HR_A0_FW_PRE  "iwlwifi-QuQnj-a0-hr-a0-"
-
-#define IWL_A000_HR_MODULE_FIRMWARE(api) \
-       IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_JF_MODULE_FIRMWARE(api) \
-       IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
-       IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
-       IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
-       IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
-
-#define NVM_HW_SECTION_NUM_FAMILY_A000         10
-
-static const struct iwl_base_params iwl_a000_base_params = {
-       .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000,
-       .num_of_queues = 512,
-       .shadow_ram_support = true,
-       .led_compensation = 57,
-       .wd_timeout = IWL_LONG_WD_TIMEOUT,
-       .max_event_log_size = 512,
-       .shadow_reg_enable = true,
-       .pcie_l1_allowed = true,
-};
-
-static const struct iwl_ht_params iwl_a000_ht_params = {
-       .stbc = true,
-       .ldpc = true,
-       .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
-};
-
-#define IWL_DEVICE_A000                                                        \
-       .ucode_api_max = IWL_A000_UCODE_API_MAX,                        \
-       .ucode_api_min = IWL_A000_UCODE_API_MIN,                        \
-       .device_family = IWL_DEVICE_FAMILY_A000,                        \
-       .max_inst_size = IWL60_RTC_INST_SIZE,                           \
-       .max_data_size = IWL60_RTC_DATA_SIZE,                           \
-       .base_params = &iwl_a000_base_params,                           \
-       .led_mode = IWL_LED_RF_STATE,                                   \
-       .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_A000,           \
-       .non_shared_ant = ANT_A,                                        \
-       .dccm_offset = IWL_A000_DCCM_OFFSET,                            \
-       .dccm_len = IWL_A000_DCCM_LEN,                                  \
-       .dccm2_offset = IWL_A000_DCCM2_OFFSET,                          \
-       .dccm2_len = IWL_A000_DCCM2_LEN,                                \
-       .smem_offset = IWL_A000_SMEM_OFFSET,                            \
-       .smem_len = IWL_A000_SMEM_LEN,                                  \
-       .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,           \
-       .apmg_not_supported = true,                                     \
-       .mq_rx_supported = true,                                        \
-       .vht_mu_mimo_supported = true,                                  \
-       .mac_addr_from_csr = true,                                      \
-       .use_tfh = true,                                                \
-       .rf_id = true,                                                  \
-       .gen2 = true,                                                   \
-       .nvm_type = IWL_NVM_EXT,                                        \
-       .dbgc_supported = true,                                         \
-       .tx_cmd_queue_size = 32,                                        \
-       .min_umac_error_event_table = 0x400000
-
-const struct iwl_cfg iwla000_2ac_cfg_hr = {
-       .name = "Intel(R) Dual Band Wireless AC a000",
-       .fw_name_pre = IWL_A000_HR_FW_PRE,
-       IWL_DEVICE_A000,
-       .ht_params = &iwl_a000_ht_params,
-       .nvm_ver = IWL_A000_NVM_VERSION,
-       .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = {
-       .name = "Intel(R) Dual Band Wireless AC a000",
-       .fw_name_pre = IWL_A000_HR_CDB_FW_PRE,
-       IWL_DEVICE_A000,
-       .ht_params = &iwl_a000_ht_params,
-       .nvm_ver = IWL_A000_NVM_VERSION,
-       .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-       .cdb = true,
-};
-
-const struct iwl_cfg iwla000_2ac_cfg_jf = {
-       .name = "Intel(R) Dual Band Wireless AC a000",
-       .fw_name_pre = IWL_A000_JF_FW_PRE,
-       IWL_DEVICE_A000,
-       .ht_params = &iwl_a000_ht_params,
-       .nvm_ver = IWL_A000_NVM_VERSION,
-       .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_hr = {
-       .name = "Intel(R) Dual Band Wireless AX a000",
-       .fw_name_pre = IWL_A000_HR_FW_PRE,
-       IWL_DEVICE_A000,
-       .ht_params = &iwl_a000_ht_params,
-       .nvm_ver = IWL_A000_NVM_VERSION,
-       .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0 = {
-       .name = "Intel(R) Dual Band Wireless AX a000",
-       .fw_name_pre = IWL_A000_HR_F0_FW_PRE,
-       IWL_DEVICE_A000,
-       .ht_params = &iwl_a000_ht_params,
-       .nvm_ver = IWL_A000_NVM_VERSION,
-       .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0 = {
-       .name = "Intel(R) Dual Band Wireless AX a000",
-       .fw_name_pre = IWL_A000_JF_B0_FW_PRE,
-       IWL_DEVICE_A000,
-       .ht_params = &iwl_a000_ht_params,
-       .nvm_ver = IWL_A000_NVM_VERSION,
-       .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0 = {
-       .name = "Intel(R) Dual Band Wireless AX a000",
-       .fw_name_pre = IWL_A000_HR_A0_FW_PRE,
-       IWL_DEVICE_A000,
-       .ht_params = &iwl_a000_ht_params,
-       .nvm_ver = IWL_A000_NVM_VERSION,
-       .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
index 3684a3e180e5257b176d8e4d8bc07efa38a23272..007bfe7656a4ae704722b3eaf2b29fe33b03238b 100644 (file)
@@ -95,8 +95,8 @@ enum {
 #define IWL_ALIVE_FLG_RFKILL   BIT(0)
 
 struct iwl_lmac_alive {
-       __le32 ucode_minor;
        __le32 ucode_major;
+       __le32 ucode_minor;
        u8 ver_subtype;
        u8 ver_type;
        u8 mac;
@@ -113,8 +113,8 @@ struct iwl_lmac_alive {
 } __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
 
 struct iwl_umac_alive {
-       __le32 umac_minor;              /* UMAC version: minor */
        __le32 umac_major;              /* UMAC version: major */
+       __le32 umac_minor;              /* UMAC version: minor */
        __le32 error_info_addr;         /* SRAM address for UMAC error log */
        __le32 dbg_print_buff_addr;
 } __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
index d09555afe2c541552f59f933f490d0e2f9721e66..87c1ddea75aef33544114018269bee97185fe9c4 100644 (file)
@@ -188,11 +188,6 @@ enum iwl_bt_mxbox_dw3 {
        BT_MBOX(3, UPDATE_REQUEST, 21, 1),
 };
 
-enum iwl_bt_mxbox_dw4 {
-       BT_MBOX(4, ATS_BT_INTERVAL, 0, 7),
-       BT_MBOX(4, ATS_BT_ACTIVE_MAX_TH, 7, 7),
-};
-
 #define BT_MBOX_MSG(_notif, _num, _field)                                   \
        ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
        >> BT_MBOX##_num##_##_field##_POS)
@@ -232,31 +227,6 @@ enum iwl_bt_ci_compliance {
  * @reserved: reserved
  */
 struct iwl_bt_coex_profile_notif {
-       __le32 mbox_msg[8];
-       __le32 msg_idx;
-       __le32 bt_ci_compliance;
-
-       __le32 primary_ch_lut;
-       __le32 secondary_ch_lut;
-       __le32 bt_activity_grading;
-       u8 ttc_status;
-       u8 rrc_status;
-       __le16 reserved;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_5 */
-
-/**
- * struct iwl_bt_coex_profile_notif - notification about BT coex
- * @mbox_msg: message from BT to WiFi
- * @msg_idx: the index of the message
- * @bt_ci_compliance: enum %iwl_bt_ci_compliance
- * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type
- * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type
- * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
- * @ttc_status: is TTC enabled - one bit per PHY
- * @rrc_status: is RRC enabled - one bit per PHY
- * @reserved: reserved
- */
-struct iwl_bt_coex_profile_notif_v4 {
        __le32 mbox_msg[4];
        __le32 msg_idx;
        __le32 bt_ci_compliance;
index 7ebbf097488b066db27093c777b9ce9ff33b6d56..f285bacc8726523b8b3b38b44da3d019f8c61435 100644 (file)
@@ -215,7 +215,7 @@ enum iwl_legacy_cmds {
        /**
         * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware,
         *      &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp
-        *      for newer (A000) hardware.
+        *      for newer (22000) hardware.
         */
        SCD_QUEUE_CFG = 0x1d,
 
index aa76dcc148bd6cb53b29039903ce04569026994d..a57c7223df0f5187f30185de872285f396434793 100644 (file)
@@ -82,6 +82,21 @@ enum iwl_data_path_subcmd_ids {
         */
        TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
 
+       /**
+        * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
+        */
+       TLC_MNG_CONFIG_CMD = 0xF,
+
+       /**
+        * @TLC_MNG_NOTIF_REQ_CMD: &struct iwl_tlc_notif_req_config_cmd
+        */
+       TLC_MNG_NOTIF_REQ_CMD = 0x10,
+
+       /**
+        * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
+        */
+       TLC_MNG_UPDATE_NOTIF = 0xF7,
+
        /**
         * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification
         */
index a13fd8a1be629b068ad9e80239f68f1582264488..e9a6e5627f94cf83e7095a627c36bdb16f5c16bc 100644 (file)
 
 #include "mac.h"
 
+/**
+ * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
+ * @IWL_TLC_MNG_CFG_FLAGS_CCK_MSK: CCK support
+ * @IWL_TLC_MNG_CFG_FLAGS_DD_MSK: enable DD
+ * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC
+ * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
+ * @IWL_TLC_MNG_CFG_FLAGS_BF_MSK: enable BFER
+ * @IWL_TLC_MNG_CFG_FLAGS_DCM_MSK: enable DCM
+ */
+enum iwl_tlc_mng_cfg_flags_enum {
+       IWL_TLC_MNG_CFG_FLAGS_CCK_MSK   = BIT(0),
+       IWL_TLC_MNG_CFG_FLAGS_DD_MSK    = BIT(1),
+       IWL_TLC_MNG_CFG_FLAGS_STBC_MSK  = BIT(2),
+       IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK  = BIT(3),
+       IWL_TLC_MNG_CFG_FLAGS_BF_MSK    = BIT(4),
+       IWL_TLC_MNG_CFG_FLAGS_DCM_MSK   = BIT(5),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_cw_enum - channel width options
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ: 20MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ: 40MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ: 80MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ: 160MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_LAST: maximum value
+ */
+enum iwl_tlc_mng_cfg_cw_enum {
+       IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ,
+       IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ,
+       IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ,
+       IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
+       IWL_TLC_MNG_MAX_CH_WIDTH_LAST = IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_chains_enum - possible chains
+ * @IWL_TLC_MNG_CHAIN_A_MSK: chain A
+ * @IWL_TLC_MNG_CHAIN_B_MSK: chain B
+ * @IWL_TLC_MNG_CHAIN_C_MSK: chain C
+ */
+enum iwl_tlc_mng_cfg_chains_enum {
+       IWL_TLC_MNG_CHAIN_A_MSK = BIT(0),
+       IWL_TLC_MNG_CHAIN_B_MSK = BIT(1),
+       IWL_TLC_MNG_CHAIN_C_MSK = BIT(2),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_gi_enum - guard interval options
+ * @IWL_TLC_MNG_SGI_20MHZ_MSK: enable short GI for 20MHZ
+ * @IWL_TLC_MNG_SGI_40MHZ_MSK: enable short GI for 40MHZ
+ * @IWL_TLC_MNG_SGI_80MHZ_MSK: enable short GI for 80MHZ
+ * @IWL_TLC_MNG_SGI_160MHZ_MSK: enable short GI for 160MHZ
+ */
+enum iwl_tlc_mng_cfg_gi_enum {
+       IWL_TLC_MNG_SGI_20MHZ_MSK  = BIT(0),
+       IWL_TLC_MNG_SGI_40MHZ_MSK  = BIT(1),
+       IWL_TLC_MNG_SGI_80MHZ_MSK  = BIT(2),
+       IWL_TLC_MNG_SGI_160MHZ_MSK = BIT(3),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_mode_enum - supported modes
+ * @IWL_TLC_MNG_MODE_CCK: enable CCK
+ * @IWL_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT)
+ * @IWL_TLC_MNG_MODE_NON_HT: enable non HT
+ * @IWL_TLC_MNG_MODE_HT: enable HT
+ * @IWL_TLC_MNG_MODE_VHT: enable VHT
+ * @IWL_TLC_MNG_MODE_HE: enable HE
+ * @IWL_TLC_MNG_MODE_INVALID: invalid value
+ * @IWL_TLC_MNG_MODE_NUM: a count of possible modes
+ */
+enum iwl_tlc_mng_cfg_mode_enum {
+       IWL_TLC_MNG_MODE_CCK = 0,
+       IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK,
+       IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK,
+       IWL_TLC_MNG_MODE_HT,
+       IWL_TLC_MNG_MODE_VHT,
+       IWL_TLC_MNG_MODE_HE,
+       IWL_TLC_MNG_MODE_INVALID,
+       IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID,
+};
+
+/**
+ * enum iwl_tlc_mng_vht_he_types_enum - VHT HE types
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU: VHT HT single user
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT: VHT HT single user extended
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU: VHT HT multiple users
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED: trigger based
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM: a count of possible types
+ */
+enum iwl_tlc_mng_vht_he_types_enum {
+       IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU = 0,
+       IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT,
+       IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU,
+       IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
+       IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM =
+               IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
+
+};
+
+/**
+ * enum iwl_tlc_mng_ht_rates_enum - HT/VHT rates
+ * @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0
+ * @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1
+ * @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2
+ * @IWL_TLC_MNG_HT_RATE_MCS3: index of MCS3
+ * @IWL_TLC_MNG_HT_RATE_MCS4: index of MCS4
+ * @IWL_TLC_MNG_HT_RATE_MCS5: index of MCS5
+ * @IWL_TLC_MNG_HT_RATE_MCS6: index of MCS6
+ * @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7
+ * @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8
+ * @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9
+ * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT
+ */
+enum iwl_tlc_mng_ht_rates_enum {
+       IWL_TLC_MNG_HT_RATE_MCS0 = 0,
+       IWL_TLC_MNG_HT_RATE_MCS1,
+       IWL_TLC_MNG_HT_RATE_MCS2,
+       IWL_TLC_MNG_HT_RATE_MCS3,
+       IWL_TLC_MNG_HT_RATE_MCS4,
+       IWL_TLC_MNG_HT_RATE_MCS5,
+       IWL_TLC_MNG_HT_RATE_MCS6,
+       IWL_TLC_MNG_HT_RATE_MCS7,
+       IWL_TLC_MNG_HT_RATE_MCS8,
+       IWL_TLC_MNG_HT_RATE_MCS9,
+       IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS9,
+};
+
+/* Maximum supported tx antennas number */
+#define MAX_RS_ANT_NUM 3
+
+/**
+ * struct tlc_config_cmd - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_supp_ch_width: channel width
+ * @flags: bitmask of %IWL_TLC_MNG_CONFIG_FLAGS_ENABLE_\*
+ * @chains: bitmask of %IWL_TLC_MNG_CHAIN_\*
+ * @max_supp_ss: valid values are 0-3, 0 - spatial streams are not supported
+ * @valid_vht_he_types: bitmap of %IWL_TLC_MNG_VALID_VHT_HE_TYPES_\*
+ * @non_ht_supp_rates: bitmap of supported legacy rates
+ * @ht_supp_rates: bitmap of supported HT/VHT rates, valid bits are 0-9
+ * @mode: modulation type %IWL_TLC_MNG_MODE_\*
+ * @reserved2: reserved
+ * @he_supp_rates: bitmap of supported HE rates
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * @he_gi_support: 11ax HE guard interval
+ * @max_ampdu_cnt: max AMPDU size (frames count)
+ */
+struct iwl_tlc_config_cmd {
+       u8 sta_id;
+       u8 reserved1[3];
+       u8 max_supp_ch_width;
+       u8 chains;
+       u8 max_supp_ss;
+       u8 valid_vht_he_types;
+       __le16 flags;
+       __le16 non_ht_supp_rates;
+       __le16 ht_supp_rates[MAX_RS_ANT_NUM];
+       u8 mode;
+       u8 reserved2;
+       __le16 he_supp_rates;
+       u8 sgi_ch_width_supp;
+       u8 he_gi_support;
+       __le32 max_ampdu_cnt;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_1 */
+
+#define IWL_TLC_NOTIF_INIT_RATE_POS 0
+#define IWL_TLC_NOTIF_INIT_RATE_MSK BIT(IWL_TLC_NOTIF_INIT_RATE_POS)
+#define IWL_TLC_NOTIF_REQ_INTERVAL (500)
+
+/**
+ * struct iwl_tlc_notif_req_config_cmd - request notif on specific changes
+ * @sta_id: relevant station
+ * @reserved1: reserved
+ * @flags: bitmap of requested notifications %IWL_TLC_NOTIF_INIT_\*
+ * @interval: minimum time between notifications from TLC to the driver (msec)
+ * @reserved2: reserved
+ */
+struct iwl_tlc_notif_req_config_cmd {
+       u8 sta_id;
+       u8 reserved1;
+       __le16 flags;
+       __le16 interval;
+       __le16 reserved2;
+} __packed; /* TLC_MNG_NOTIF_REQ_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tlc_update_notif - TLC notification from FW
+ * @sta_id: station id
+ * @reserved: reserved
+ * @flags: bitmap of notifications reported
+ * @values: field per flag in struct iwl_tlc_notif_req_config_cmd
+ */
+struct iwl_tlc_update_notif {
+       u8 sta_id;
+       u8 reserved;
+       __le16 flags;
+       __le32 values[16];
+} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_1 */
+
+/**
+ * enum iwl_tlc_debug_flags - debug options
+ * @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling
+ * @IWL_TLC_DEBUG_STATS_TH: threshold for sending statistics to the driver, in
+ *     frames
+ * @IWL_TLC_DEBUG_STATS_TIME_TH: threshold for sending statistics to the
+ *     driver, in msec
+ * @IWL_TLC_DEBUG_AGG_TIME_LIM: time limit for a BA session
+ * @IWL_TLC_DEBUG_AGG_DIS_START_TH: frame with try-count greater than this
+ *     threshold should not start an aggregation session
+ * @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames in an aggregation
+ * @IWL_TLC_DEBUG_RENEW_ADDBA_DELAY: delay between retries of ADD BA
+ * @IWL_TLC_DEBUG_START_AC_RATE_IDX: frames per second to start a BA session
+ * @IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK: disable BW scaling
+ */
+enum iwl_tlc_debug_flags {
+       IWL_TLC_DEBUG_FIXED_RATE,
+       IWL_TLC_DEBUG_STATS_TH,
+       IWL_TLC_DEBUG_STATS_TIME_TH,
+       IWL_TLC_DEBUG_AGG_TIME_LIM,
+       IWL_TLC_DEBUG_AGG_DIS_START_TH,
+       IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM,
+       IWL_TLC_DEBUG_RENEW_ADDBA_DELAY,
+       IWL_TLC_DEBUG_START_AC_RATE_IDX,
+       IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK,
+}; /* TLC_MNG_DEBUG_FLAGS_API_E_VER_1 */
+
+/**
+ * struct iwl_dhc_tlc_dbg - fixed debug config
+ * @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id
+ * @reserved1: reserved
+ * @flags: bitmap of %IWL_TLC_DEBUG_\*
+ * @fixed_rate: rate value
+ * @stats_threshold: if number of tx-ed frames is greater, send statistics
+ * @time_threshold: statistics threshold in usec
+ * @agg_time_lim: max agg time
+ * @agg_dis_start_threshold: frames with try-cont greater than this count will
+ *                          not be aggregated
+ * @agg_frame_count_lim: agg size
+ * @addba_retry_delay: delay between retries of ADD BA
+ * @start_ac_rate_idx: frames per second to start a BA session
+ * @no_far_range_tweak: disable BW scaling
+ * @reserved2: reserved
+ */
+struct iwl_dhc_tlc_cmd {
+       u8 sta_id;
+       u8 reserved1[3];
+       __le32 flags;
+       __le32 fixed_rate;
+       __le16 stats_threshold;
+       __le16 time_threshold;
+       __le16 agg_time_lim;
+       __le16 agg_dis_start_threshold;
+       __le16 agg_frame_count_lim;
+       __le16 addba_retry_delay;
+       u8 start_ac_rate_idx[IEEE80211_NUM_ACS];
+       u8 no_far_range_tweak;
+       u8 reserved2[3];
+} __packed;
+
 /*
  * These serve as indexes into
  * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
@@ -253,7 +514,6 @@ enum {
 #define RATE_MCS_ANT_ABC_MSK           (RATE_MCS_ANT_AB_MSK | \
                                         RATE_MCS_ANT_C_MSK)
 #define RATE_MCS_ANT_MSK               RATE_MCS_ANT_ABC_MSK
-#define RATE_MCS_ANT_NUM 3
 
 /* Bit 17: (0) SS, (1) SS*2 */
 #define RATE_MCS_STBC_POS              17
index f5d5ba7e37ecbb048f2fcef816c4cc1a2be6f010..a2a40b515a3c8301848462275cb79a5caba3c95b 100644 (file)
@@ -121,7 +121,7 @@ enum iwl_tx_flags {
 }; /* TX_FLAGS_BITS_API_S_VER_1 */
 
 /**
- * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000
+ * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for 22000
  * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command
  * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
  *     to a secured STA
@@ -301,7 +301,7 @@ struct iwl_dram_sec_info {
 } __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
 
 /**
- * struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices
+ * struct iwl_tx_cmd_gen2 - TX command struct to FW for 22000 devices
  * ( TX_CMD = 0x1c )
  * @len: in bytes of the payload, see below for details
  * @offload_assist: TX offload configuration
index 8106fd4be996bf5431206e9556cc85e33b79ccdb..67aefc8fc9acc51a61070cc828a3c27019746409 100644 (file)
@@ -964,7 +964,20 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
        if (trigger)
                delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
 
-       if (WARN(fwrt->trans->state == IWL_TRANS_NO_FW,
+       /*
+        * If the loading of the FW completed successfully, the next step is to
+        * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
+        * zero, the FW was already loaded successully. If the state is "NO_FW"
+        * in such a case - WARN and exit, since FW may be dead. Otherwise, we
+        * can try to collect the data, since FW might just not be fully
+        * loaded (no "ALIVE" yet), and the debug data is accessible.
+        *
+        * Corner case: got the FW alive but crashed before getting the SMEM
+        *      config. In such a case, due to HW access problems, we might
+        *      collect garbage.
+        */
+       if (WARN((fwrt->trans->state == IWL_TRANS_NO_FW) &&
+                fwrt->smem_cfg.num_lmacs,
                 "Can't collect dbg data when FW isn't alive\n"))
                return -EIO;
 
index 37a5c5b4eda6ca48eb24c6978c8aeba640e1886d..4687d016f6767ea219cbf5b5a37be1a312327448 100644 (file)
@@ -246,8 +246,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
  * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
  * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
  * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
- * @IWL_UCODE_TLV_API_ATS_COEX_EXTERNAL: the coex notification is enlared to
- *     include information about ACL time sharing.
  * @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
  *     indicating low latency direction.
  *
@@ -267,7 +265,6 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_ADAPTIVE_DWELL        = (__force iwl_ucode_tlv_api_t)32,
        IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE   = (__force iwl_ucode_tlv_api_t)34,
        IWL_UCODE_TLV_API_NEW_RX_STATS          = (__force iwl_ucode_tlv_api_t)35,
-       IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL     = (__force iwl_ucode_tlv_api_t)37,
        IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY     = (__force iwl_ucode_tlv_api_t)38,
 
        NUM_IWL_UCODE_TLV_API
@@ -313,6 +310,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
  * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
  * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
+ * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
  * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
  * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
  * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@@ -367,6 +365,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT          = (__force iwl_ucode_tlv_capa_t)39,
        IWL_UCODE_TLV_CAPA_CDB_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)40,
        IWL_UCODE_TLV_CAPA_D0I3_END_FIRST               = (__force iwl_ucode_tlv_capa_t)41,
+       IWL_UCODE_TLV_CAPA_TLC_OFFLOAD                  = (__force iwl_ucode_tlv_capa_t)43,
        IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE         = (__force iwl_ucode_tlv_capa_t)64,
        IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS            = (__force iwl_ucode_tlv_capa_t)65,
        IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT             = (__force iwl_ucode_tlv_capa_t)67,
index 76675736ba4f77c33f9069c1156863a4f71e8c14..fb4b6442b4d7d4b9b381194c5d5c03c93c98bfa0 100644 (file)
@@ -63,8 +63,8 @@
 #include "runtime.h"
 #include "fw/api/commands.h"
 
-static void iwl_parse_shared_mem_a000(struct iwl_fw_runtime *fwrt,
-                                     struct iwl_rx_packet *pkt)
+static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
+                                      struct iwl_rx_packet *pkt)
 {
        struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
        int i, lmac;
@@ -143,8 +143,8 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
                return;
 
        pkt = cmd.resp_pkt;
-       if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
-               iwl_parse_shared_mem_a000(fwrt, pkt);
+       if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+               iwl_parse_shared_mem_22000(fwrt, pkt);
        else
                iwl_parse_shared_mem(fwrt, pkt);
 
index e21e46cf6f9a3ebad9926826f2168c2434ed4fd4..258d439bb0a9ce40782371fc33281270891bd5a5 100644 (file)
@@ -89,7 +89,7 @@ enum iwl_device_family {
        IWL_DEVICE_FAMILY_7000,
        IWL_DEVICE_FAMILY_8000,
        IWL_DEVICE_FAMILY_9000,
-       IWL_DEVICE_FAMILY_A000,
+       IWL_DEVICE_FAMILY_22000,
 };
 
 /*
@@ -266,7 +266,7 @@ struct iwl_tt_params {
 #define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */
 #define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */
 #define OTP_LOW_IMAGE_SIZE_FAMILY_9000 OTP_LOW_IMAGE_SIZE_FAMILY_8000
-#define OTP_LOW_IMAGE_SIZE_FAMILY_A000 OTP_LOW_IMAGE_SIZE_FAMILY_9000
+#define OTP_LOW_IMAGE_SIZE_FAMILY_22000        OTP_LOW_IMAGE_SIZE_FAMILY_9000
 
 struct iwl_eeprom_params {
        const u8 regulatory_bands[7];
@@ -330,7 +330,7 @@ struct iwl_pwr_tx_backoff {
  * @vht_mu_mimo_supported: VHT MU-MIMO support
  * @rf_id: need to read rf_id to determine the firmware image
  * @integrated: discrete or integrated
- * @gen2: a000 and on transport operation
+ * @gen2: 22000 and on transport operation
  * @cdb: CDB support
  * @nvm_type: see &enum iwl_nvm_type
  * @tx_cmd_queue_size: size of the cmd queue. If zero, use the same value as
@@ -477,13 +477,13 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
-extern const struct iwl_cfg iwla000_2ac_cfg_hr;
-extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
-extern const struct iwl_cfg iwla000_2ac_cfg_jf;
-extern const struct iwl_cfg iwla000_2ax_cfg_hr;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0;
+extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
+extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
+extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
+extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
 #endif /* CONFIG_IWLMVM */
 
 #endif /* __IWL_CONFIG_H__ */
index 4b224d7d967cc3192a649778b5045f29429e43af..de8f6ae2f51bdbd15c3cfe26ecf374610c386844 100644 (file)
@@ -919,9 +919,14 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        minor = le32_to_cpup(ptr++);
                        local_comp = le32_to_cpup(ptr);
 
-                       snprintf(drv->fw.fw_version,
-                                sizeof(drv->fw.fw_version), "%u.%u.%u",
-                                major, minor, local_comp);
+                       if (major >= 35)
+                               snprintf(drv->fw.fw_version,
+                                        sizeof(drv->fw.fw_version),
+                                       "%u.%08x.%u", major, minor, local_comp);
+                       else
+                               snprintf(drv->fw.fw_version,
+                                        sizeof(drv->fw.fw_version),
+                                       "%u.%u.%u", major, minor, local_comp);
                        break;
                        }
                case IWL_UCODE_TLV_FW_DBG_DEST: {
index 66e5db41e5594405ee52591d1cd21a10257f8b72..11789ffb6512037b6b4cdc4c8a997bf7e3225f45 100644 (file)
 #define FH_MEM_CBBC_16_19_UPPER_BOUND          (FH_MEM_LOWER_BOUND + 0xC00)
 #define FH_MEM_CBBC_20_31_LOWER_BOUND          (FH_MEM_LOWER_BOUND + 0xB20)
 #define FH_MEM_CBBC_20_31_UPPER_BOUND          (FH_MEM_LOWER_BOUND + 0xB80)
-/* a000 TFD table address, 64 bit */
+/* 22000 TFD table address, 64 bit */
 #define TFH_TFDQ_CBB_TABLE                     (0x1C00)
 
 /* Find TFD CB base pointer for given queue */
@@ -140,7 +140,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
        return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
 }
 
-/* a000 configuration registers */
+/* 22000 configuration registers */
 
 /*
  * TFH Configuration register.
@@ -697,8 +697,8 @@ struct iwl_tfh_tb {
  * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
  * Both driver and device share these circular buffers, each of which must be
  * contiguous 256 TFDs.
- * For pre a000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
- * For a000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
+ * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
+ * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
  *
  * Driver must indicate the physical address of the base of each
  * circular buffer via the FH_MEM_CBBC_QUEUE registers.
@@ -750,10 +750,10 @@ struct iwl_tfh_tfd {
 /**
  * struct iwlagn_schedq_bc_tbl scheduler byte count table
  *     base physical address provided by SCD_DRAM_BASE_ADDR
- * For devices up to a000:
+ * For devices up to 22000:
  * @tfd_offset  0-12 - tx command byte count
  *             12-16 - station index
- * For a000 and on:
+ * For 22000 and on:
  * @tfd_offset  0-12 - tx command byte count
  *             12-13 - number of 64 byte chunks
  *             14-16 - reserved
index 921cab9e2d737bf7fac47028339852dd60c201c6..84ae1e274d3855ebe25f307afd05372eed45423c 100644 (file)
@@ -551,7 +551,7 @@ struct iwl_trans_ops {
                           unsigned int queue_wdg_timeout);
        void (*txq_disable)(struct iwl_trans *trans, int queue,
                            bool configure_scd);
-       /* a000 functions */
+       /* 22000 functions */
        int (*txq_alloc)(struct iwl_trans *trans,
                         struct iwl_tx_queue_cfg_cmd *cmd,
                         int cmd_id,
index a47635c32c11175af17e0fba1087afcda5aab796..9ffd21918b5aa65a927fade7d0f1803a726d2d02 100644 (file)
@@ -2,7 +2,7 @@
 obj-$(CONFIG_IWLMVM)   += iwlmvm.o
 iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
 iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
-iwlmvm-y += scan.o time-event.o rs.o
+iwlmvm-y += scan.o time-event.o rs.o rs-fw.o
 iwlmvm-y += power.o coex.o
 iwlmvm-y += tt.o offloading.o tdls.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
index 79c80f181f7d55031d5a5bc1cb3b3ae7291586f6..890dbfff3a068a835e5f3050ac708a00da588de7 100644 (file)
@@ -7,7 +7,6 @@
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2017        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +33,6 @@
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2017        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -514,36 +512,17 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-       if (!iwl_mvm_has_new_ats_coex_api(mvm)) {
-               struct iwl_bt_coex_profile_notif_v4 *v4 = (void *)pkt->data;
-
-               mvm->last_bt_notif.mbox_msg[0] = v4->mbox_msg[0];
-               mvm->last_bt_notif.mbox_msg[1] = v4->mbox_msg[1];
-               mvm->last_bt_notif.mbox_msg[2] = v4->mbox_msg[2];
-               mvm->last_bt_notif.mbox_msg[3] = v4->mbox_msg[3];
-               mvm->last_bt_notif.msg_idx = v4->msg_idx;
-               mvm->last_bt_notif.bt_ci_compliance = v4->bt_ci_compliance;
-               mvm->last_bt_notif.primary_ch_lut = v4->primary_ch_lut;
-               mvm->last_bt_notif.secondary_ch_lut = v4->secondary_ch_lut;
-               mvm->last_bt_notif.bt_activity_grading =
-                       v4->bt_activity_grading;
-               mvm->last_bt_notif.ttc_status = v4->ttc_status;
-               mvm->last_bt_notif.rrc_status = v4->rrc_status;
-       } else {
-               /* save this notification for future use: rssi fluctuations */
-               memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
-       }
-
        IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
-       IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n",
-                      mvm->last_bt_notif.bt_ci_compliance);
+       IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
        IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
-                      le32_to_cpu(mvm->last_bt_notif.primary_ch_lut));
+                      le32_to_cpu(notif->primary_ch_lut));
        IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
-                      le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut));
+                      le32_to_cpu(notif->secondary_ch_lut));
        IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
-                      le32_to_cpu(mvm->last_bt_notif.bt_activity_grading));
+                      le32_to_cpu(notif->bt_activity_grading));
 
+       /* remember this notification for future use: rssi fluctuations */
+       memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
 
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
index b1f73dcabd31e53ae664687b10856e90c5a0e707..0e6cf39285f405d8c42e65ea2d0ef7d4e86179f0 100644 (file)
@@ -429,231 +429,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
        return err;
 }
 
-enum iwl_mvm_tcp_packet_type {
-       MVM_TCP_TX_SYN,
-       MVM_TCP_RX_SYNACK,
-       MVM_TCP_TX_DATA,
-       MVM_TCP_RX_ACK,
-       MVM_TCP_RX_WAKE,
-       MVM_TCP_TX_FIN,
-};
-
-static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
-{
-       __sum16 check = tcp_v4_check(len, saddr, daddr, 0);
-       return cpu_to_le16(be16_to_cpu((__force __be16)check));
-}
-
-static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
-                                    struct cfg80211_wowlan_tcp *tcp,
-                                    void *_pkt, u8 *mask,
-                                    __le16 *pseudo_hdr_csum,
-                                    enum iwl_mvm_tcp_packet_type ptype)
-{
-       struct {
-               struct ethhdr eth;
-               struct iphdr ip;
-               struct tcphdr tcp;
-               u8 data[];
-       } __packed *pkt = _pkt;
-       u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
-       int i;
-
-       pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
-       pkt->ip.version = 4;
-       pkt->ip.ihl = 5;
-       pkt->ip.protocol = IPPROTO_TCP;
-
-       switch (ptype) {
-       case MVM_TCP_TX_SYN:
-       case MVM_TCP_TX_DATA:
-       case MVM_TCP_TX_FIN:
-               memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
-               memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
-               pkt->ip.ttl = 128;
-               pkt->ip.saddr = tcp->src;
-               pkt->ip.daddr = tcp->dst;
-               pkt->tcp.source = cpu_to_be16(tcp->src_port);
-               pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
-               /* overwritten for TX SYN later */
-               pkt->tcp.doff = sizeof(struct tcphdr) / 4;
-               pkt->tcp.window = cpu_to_be16(65000);
-               break;
-       case MVM_TCP_RX_SYNACK:
-       case MVM_TCP_RX_ACK:
-       case MVM_TCP_RX_WAKE:
-               memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
-               memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
-               pkt->ip.saddr = tcp->dst;
-               pkt->ip.daddr = tcp->src;
-               pkt->tcp.source = cpu_to_be16(tcp->dst_port);
-               pkt->tcp.dest = cpu_to_be16(tcp->src_port);
-               break;
-       default:
-               WARN_ON(1);
-               return;
-       }
-
-       switch (ptype) {
-       case MVM_TCP_TX_SYN:
-               /* firmware assumes 8 option bytes - 8 NOPs for now */
-               memset(pkt->data, 0x01, 8);
-               ip_tot_len += 8;
-               pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
-               pkt->tcp.syn = 1;
-               break;
-       case MVM_TCP_TX_DATA:
-               ip_tot_len += tcp->payload_len;
-               memcpy(pkt->data, tcp->payload, tcp->payload_len);
-               pkt->tcp.psh = 1;
-               pkt->tcp.ack = 1;
-               break;
-       case MVM_TCP_TX_FIN:
-               pkt->tcp.fin = 1;
-               pkt->tcp.ack = 1;
-               break;
-       case MVM_TCP_RX_SYNACK:
-               pkt->tcp.syn = 1;
-               pkt->tcp.ack = 1;
-               break;
-       case MVM_TCP_RX_ACK:
-               pkt->tcp.ack = 1;
-               break;
-       case MVM_TCP_RX_WAKE:
-               ip_tot_len += tcp->wake_len;
-               pkt->tcp.psh = 1;
-               pkt->tcp.ack = 1;
-               memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
-               break;
-       }
-
-       switch (ptype) {
-       case MVM_TCP_TX_SYN:
-       case MVM_TCP_TX_DATA:
-       case MVM_TCP_TX_FIN:
-               pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
-               pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
-               break;
-       case MVM_TCP_RX_WAKE:
-               for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
-                       u8 tmp = tcp->wake_mask[i];
-                       mask[i + 6] |= tmp << 6;
-                       if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
-                               mask[i + 7] = tmp >> 2;
-               }
-               /* fall through for ethernet/IP/TCP headers mask */
-       case MVM_TCP_RX_SYNACK:
-       case MVM_TCP_RX_ACK:
-               mask[0] = 0xff; /* match ethernet */
-               /*
-                * match ethernet, ip.version, ip.ihl
-                * the ip.ihl half byte is really masked out by firmware
-                */
-               mask[1] = 0x7f;
-               mask[2] = 0x80; /* match ip.protocol */
-               mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
-               mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
-               mask[5] = 0x80; /* match tcp flags */
-               /* leave rest (0 or set for MVM_TCP_RX_WAKE) */
-               break;
-       };
-
-       *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
-                                           pkt->ip.saddr, pkt->ip.daddr);
-}
-
-static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
-                                       struct ieee80211_vif *vif,
-                                       struct cfg80211_wowlan_tcp *tcp)
-{
-       struct iwl_wowlan_remote_wake_config *cfg;
-       struct iwl_host_cmd cmd = {
-               .id = REMOTE_WAKE_CONFIG_CMD,
-               .len = { sizeof(*cfg), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-       int ret;
-
-       if (!tcp)
-               return 0;
-
-       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
-       if (!cfg)
-               return -ENOMEM;
-       cmd.data[0] = cfg;
-
-       cfg->max_syn_retries = 10;
-       cfg->max_data_retries = 10;
-       cfg->tcp_syn_ack_timeout = 1; /* seconds */
-       cfg->tcp_ack_timeout = 1; /* seconds */
-
-       /* SYN (TX) */
-       iwl_mvm_build_tcp_packet(
-               vif, tcp, cfg->syn_tx.data, NULL,
-               &cfg->syn_tx.info.tcp_pseudo_header_checksum,
-               MVM_TCP_TX_SYN);
-       cfg->syn_tx.info.tcp_payload_length = 0;
-
-       /* SYN/ACK (RX) */
-       iwl_mvm_build_tcp_packet(
-               vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
-               &cfg->synack_rx.info.tcp_pseudo_header_checksum,
-               MVM_TCP_RX_SYNACK);
-       cfg->synack_rx.info.tcp_payload_length = 0;
-
-       /* KEEPALIVE/ACK (TX) */
-       iwl_mvm_build_tcp_packet(
-               vif, tcp, cfg->keepalive_tx.data, NULL,
-               &cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
-               MVM_TCP_TX_DATA);
-       cfg->keepalive_tx.info.tcp_payload_length =
-               cpu_to_le16(tcp->payload_len);
-       cfg->sequence_number_offset = tcp->payload_seq.offset;
-       /* length must be 0..4, the field is little endian */
-       cfg->sequence_number_length = tcp->payload_seq.len;
-       cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
-       cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
-       if (tcp->payload_tok.len) {
-               cfg->token_offset = tcp->payload_tok.offset;
-               cfg->token_length = tcp->payload_tok.len;
-               cfg->num_tokens =
-                       cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
-               memcpy(cfg->tokens, tcp->payload_tok.token_stream,
-                      tcp->tokens_size);
-       } else {
-               /* set tokens to max value to almost never run out */
-               cfg->num_tokens = cpu_to_le16(65535);
-       }
-
-       /* ACK (RX) */
-       iwl_mvm_build_tcp_packet(
-               vif, tcp, cfg->keepalive_ack_rx.data,
-               cfg->keepalive_ack_rx.rx_mask,
-               &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
-               MVM_TCP_RX_ACK);
-       cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
-
-       /* WAKEUP (RX) */
-       iwl_mvm_build_tcp_packet(
-               vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
-               &cfg->wake_rx.info.tcp_pseudo_header_checksum,
-               MVM_TCP_RX_WAKE);
-       cfg->wake_rx.info.tcp_payload_length =
-               cpu_to_le16(tcp->wake_len);
-
-       /* FIN */
-       iwl_mvm_build_tcp_packet(
-               vif, tcp, cfg->fin_tx.data, NULL,
-               &cfg->fin_tx.info.tcp_pseudo_header_checksum,
-               MVM_TCP_TX_FIN);
-       cfg->fin_tx.info.tcp_payload_length = 0;
-
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-       kfree(cfg);
-
-       return ret;
-}
-
 static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                struct ieee80211_sta *ap_sta)
 {
@@ -1082,12 +857,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
        if (ret)
                return ret;
 
-       ret = iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
-       if (ret)
-               return ret;
-
-       ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
-       return ret;
+       return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
 }
 
 static int
index 2ff594f1125922bb5533d3eda0e4d98fc5c0b541..270781e13e893ed66f423e20d20d345c899995fb 100644 (file)
@@ -425,6 +425,50 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
+static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf,
+                                     size_t count, loff_t *ppos)
+{
+       struct ieee80211_sta *sta = file->private_data;
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+       struct iwl_mvm *mvm = lq_sta->pers.drv;
+       static const size_t bufsz = 2048;
+       char *buff;
+       int desc = 0;
+       ssize_t ret;
+
+       buff = kmalloc(bufsz, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       mutex_lock(&mvm->mutex);
+
+       desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n",
+                         lq_sta->pers.sta_id);
+       desc += scnprintf(buff + desc, bufsz - desc,
+                         "fixed rate 0x%X\n",
+                         lq_sta->pers.dbg_fixed_rate);
+       desc += scnprintf(buff + desc, bufsz - desc,
+                         "A-MPDU size limit %d\n",
+                         lq_sta->pers.dbg_agg_frame_count_lim);
+       desc += scnprintf(buff + desc, bufsz - desc,
+                         "valid_tx_ant %s%s%s\n",
+               (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
+               (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
+               (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
+       desc += scnprintf(buff + desc, bufsz - desc,
+                         "last tx rate=0x%X ",
+                         lq_sta->last_rate_n_flags);
+
+       desc += rs_pretty_print_rate(buff + desc, bufsz - desc,
+                                    lq_sta->last_rate_n_flags);
+       mutex_unlock(&mvm->mutex);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+       kfree(buff);
+       return ret;
+}
+
 static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
                                                char __user *user_buf,
                                                size_t count, loff_t *ppos)
@@ -470,8 +514,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
 }
 
 static
-int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm,
-                          struct iwl_bt_coex_profile_notif *notif, char *buf,
+int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
                           int pos, int bufsz)
 {
        pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
@@ -525,12 +568,7 @@ int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm,
        BT_MBOX_PRINT(3, INBAND_P, false);
        BT_MBOX_PRINT(3, MSG_TYPE_2, false);
        BT_MBOX_PRINT(3, SSN_2, false);
-       BT_MBOX_PRINT(3, UPDATE_REQUEST, !iwl_mvm_has_new_ats_coex_api(mvm));
-
-       if (iwl_mvm_has_new_ats_coex_api(mvm)) {
-               BT_MBOX_PRINT(4, ATS_BT_INTERVAL, false);
-               BT_MBOX_PRINT(4, ATS_BT_ACTIVE_MAX_TH, true);
-       }
+       BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
 
        return pos;
 }
@@ -549,7 +587,7 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
 
        mutex_lock(&mvm->mutex);
 
-       pos += iwl_mvm_coex_dump_mbox(mvm, notif, buf, pos, bufsz);
+       pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
 
        pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
                         notif->bt_ci_compliance);
@@ -721,6 +759,9 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
 
        mutex_lock(&mvm->mutex);
 
+       if (iwl_mvm_firmware_running(mvm))
+               iwl_mvm_request_statistics(mvm, false);
+
        pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
                         "Statistics_Rx - OFDM");
        if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
@@ -936,7 +977,8 @@ static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
                        continue;
                pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
                                 (int)(ARRAY_SIZE(stats->last_rates) - i));
-               pos += rs_pretty_print_rate(pos, stats->last_rates[idx]);
+               pos += rs_pretty_print_rate(pos, endpos - pos,
+                                           stats->last_rates[idx]);
        }
        spin_unlock_bh(&mvm->drv_stats_lock);
 
@@ -1603,6 +1645,19 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
 #define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
        MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
 
+#define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \
+       _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+#define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \
+       _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+
+#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \
+               if (!debugfs_create_file(alias, mode, parent, sta,      \
+                                        &iwl_dbgfs_##name##_ops))      \
+                       goto err;                                       \
+       } while (0)
+#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \
+       MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode)
+
 static ssize_t
 iwl_dbgfs_prph_reg_read(struct file *file,
                        char __user *user_buf,
@@ -1687,6 +1742,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
 MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
 MVM_DEBUGFS_READ_FILE_OPS(stations);
+MVM_DEBUGFS_READ_FILE_OPS(rs_data);
 MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
 MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
@@ -1851,6 +1907,21 @@ static const struct file_operations iwl_dbgfs_mem_ops = {
        .llseek = default_llseek,
 };
 
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_sta *sta,
+                            struct dentry *dir)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
+               MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, S_IRUSR);
+
+       return;
+err:
+       IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n");
+}
+
 int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
 {
        struct dentry *bcast_dir __maybe_unused;
index c0de7bb86cf7ea9fad58fb4a0c9aeff600385e7b..0920be637b575009e70f579862d4ae612f09c44d 100644 (file)
@@ -297,7 +297,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
        if (ret) {
                struct iwl_trans *trans = mvm->trans;
 
-               if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
+               if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
                        IWL_ERR(mvm,
                                "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
                                iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
@@ -923,11 +923,11 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
 
        ret = iwl_run_init_mvm_ucode(mvm, false);
 
-       if (iwlmvm_mod_params.init_dbg)
-               return 0;
-
        if (ret) {
                IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+
+               if (iwlmvm_mod_params.init_dbg)
+                       return 0;
                return ret;
        }
 
@@ -998,9 +998,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                goto error;
 
        /* Init RSS configuration */
-       /* TODO - remove a000 disablement when we have RXQ config API */
+       /* TODO - remove 22000 disablement when we have RXQ config API */
        if (iwl_mvm_has_new_rx_api(mvm) &&
-           mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) {
+           mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
                ret = iwl_send_rss_cfg_cmd(mvm);
                if (ret) {
                        IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
@@ -1111,7 +1111,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
        return 0;
  error:
-       if (!iwlmvm_mod_params.init_dbg)
+       if (!iwlmvm_mod_params.init_dbg || !ret)
                iwl_mvm_stop_device(mvm);
        return ret;
 }
index 3e92a117c0b86b63484358043843823fcbf71efc..4f5686526d4b385b69094a9713ad0d2472e464d0 100644 (file)
@@ -114,29 +114,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
        },
 };
 
-#ifdef CONFIG_PM_SLEEP
-static const struct nl80211_wowlan_tcp_data_token_feature
-iwl_mvm_wowlan_tcp_token_feature = {
-       .min_len = 0,
-       .max_len = 255,
-       .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
-};
-
-static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
-       .tok = &iwl_mvm_wowlan_tcp_token_feature,
-       .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
-                           sizeof(struct ethhdr) -
-                           sizeof(struct iphdr) -
-                           sizeof(struct tcphdr),
-       .data_interval_max = 65535, /* __le16 in API */
-       .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
-                           sizeof(struct ethhdr) -
-                           sizeof(struct iphdr) -
-                           sizeof(struct tcphdr),
-       .seq = true,
-};
-#endif
-
 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
 /*
  * Use the reserved field to indicate magic values.
@@ -443,6 +420,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
        ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
        ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)) {
+               ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+               ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+       }
+
        if (iwl_mvm_has_new_rx_api(mvm))
                ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
 
@@ -477,7 +460,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        /* this is the case for CCK frames, it's better (only 8) for OFDM */
        hw->radiotap_timestamp.accuracy = 22;
 
-       hw->rate_control_algorithm = "iwl-mvm-rs";
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
+               hw->rate_control_algorithm = RS_NAME;
+
        hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
        hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 
@@ -702,7 +687,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
                mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
                mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
-               mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
                hw->wiphy->wowlan = &mvm->wowlan;
        }
 #endif
@@ -3216,6 +3200,10 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
        IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
                           duration, type);
 
+       /*
+        * Flush the done work, just in case it's still pending, so that
+        * the work it does can complete and we can accept new frames.
+        */
        flush_work(&mvm->roc_done_wk);
 
        mutex_lock(&mvm->mutex);
@@ -4301,7 +4289,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
                           mvm->trans->num_rx_queues);
 
        /* TODO - remove this when we have RXQ config API */
-       if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) {
+       if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) {
                qmask = BIT(0);
                if (notif->sync)
                        atomic_set(&mvm->queue_sync_counter, 1);
@@ -4414,4 +4402,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
 #endif
        .get_survey = iwl_mvm_mac_get_survey,
        .sta_statistics = iwl_mvm_mac_sta_statistics,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       .sta_add_debugfs = iwl_mvm_sta_add_debugfs,
+#endif
 };
index 55ab5349dd40d8b886373df6cb0715e977650309..5ecba2b9bc991beeffb6e120b1972c9ae70d4a32 100644 (file)
@@ -1248,7 +1248,7 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
 static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm)
 {
        /* TODO - better define this */
-       return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_A000;
+       return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000;
 }
 
 static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
@@ -1272,12 +1272,6 @@ static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
                          IWL_UCODE_TLV_API_NEW_RX_STATS);
 }
 
-static inline bool iwl_mvm_has_new_ats_coex_api(struct iwl_mvm *mvm)
-{
-       return fw_has_api(&mvm->fw->ucode_capa,
-                         IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL);
-}
-
 static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
 {
        return fw_has_api(&mvm->fw->ucode_capa,
@@ -1600,9 +1594,9 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 /* rate scaling */
 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
 void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
-int rs_pretty_print_rate(char *buf, const u32 rate);
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
 void rs_update_last_rssi(struct iwl_mvm *mvm,
-                        struct iwl_lq_sta *lq_sta,
+                        struct iwl_mvm_sta *mvmsta,
                         struct ieee80211_rx_status *rx_status);
 
 /* power management */
@@ -1882,5 +1876,11 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
 
 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_sta *sta,
+                            struct dentry *dir);
+#endif
 
 #endif /* __IWL_MVM_H__ */
index 45470b6b351a9a31734b3a77f04fcbd99a8526eb..aab4aeaee58c6c1ebfecfc54ad7ef13b55cebe6f 100644 (file)
@@ -127,11 +127,8 @@ static int __init iwl_mvm_init(void)
        }
 
        ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
-
-       if (ret) {
+       if (ret)
                pr_err("Unable to register MVM op_mode: %d\n", ret);
-               iwl_mvm_rate_control_unregister();
-       }
 
        return ret;
 }
@@ -750,7 +747,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        mutex_lock(&mvm->mutex);
        iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
        err = iwl_run_init_mvm_ucode(mvm, true);
-       if (!iwlmvm_mod_params.init_dbg)
+       if (!iwlmvm_mod_params.init_dbg || !err)
                iwl_mvm_stop_device(mvm);
        iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
        mutex_unlock(&mvm->mutex);
@@ -1021,6 +1018,8 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
                iwl_mvm_rx_queue_notif(mvm, rxb, 0);
        else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
                iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
+       else if (cmd == WIDE_ID(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF))
+               iwl_mvm_tlc_update_notif(mvm, pkt);
        else
                iwl_mvm_rx_common(mvm, rxb, pkt);
 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
new file mode 100644 (file)
index 0000000..55d1274
--- /dev/null
@@ -0,0 +1,314 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "rs.h"
+#include "fw-api.h"
+#include "sta.h"
+#include "iwl-op-mode.h"
+#include "mvm.h"
+
+static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
+{
+       switch (sta->bandwidth) {
+       case IEEE80211_STA_RX_BW_160:
+               return IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ;
+       case IEEE80211_STA_RX_BW_80:
+               return IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ;
+       case IEEE80211_STA_RX_BW_40:
+               return IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ;
+       case IEEE80211_STA_RX_BW_20:
+       default:
+               return IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ;
+       }
+}
+
+static u8 rs_fw_set_active_chains(u8 chains)
+{
+       u8 fw_chains = 0;
+
+       if (chains & ANT_A)
+               fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
+       if (chains & ANT_B)
+               fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
+       if (chains & ANT_C)
+               fw_chains |= IWL_TLC_MNG_CHAIN_C_MSK;
+
+       return fw_chains;
+}
+
+static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
+{
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+       u8 supp = 0;
+
+       if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+               supp |= IWL_TLC_MNG_SGI_20MHZ_MSK;
+       if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+               supp |= IWL_TLC_MNG_SGI_40MHZ_MSK;
+       if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+               supp |= IWL_TLC_MNG_SGI_80MHZ_MSK;
+       if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+               supp |= IWL_TLC_MNG_SGI_160MHZ_MSK;
+
+       return supp;
+}
+
+static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
+                                 struct ieee80211_sta *sta)
+{
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+       bool vht_ena = vht_cap && vht_cap->vht_supported;
+       u16 flags = IWL_TLC_MNG_CFG_FLAGS_CCK_MSK |
+                   IWL_TLC_MNG_CFG_FLAGS_DCM_MSK |
+                   IWL_TLC_MNG_CFG_FLAGS_DD_MSK;
+
+       if (mvm->cfg->ht_params->stbc &&
+           (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+           ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
+            (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))))
+               flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+
+       if (mvm->cfg->ht_params->ldpc &&
+           ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
+            (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
+               flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+           (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+           (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
+               flags |= IWL_TLC_MNG_CFG_FLAGS_BF_MSK;
+
+       return flags;
+}
+
+static
+int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
+                                  int nss)
+{
+       u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+               (0x3 << (2 * (nss - 1)));
+       rx_mcs >>= (2 * (nss - 1));
+
+       switch (rx_mcs) {
+       case IEEE80211_VHT_MCS_SUPPORT_0_7:
+               return IWL_TLC_MNG_HT_RATE_MCS7;
+       case IEEE80211_VHT_MCS_SUPPORT_0_8:
+               return IWL_TLC_MNG_HT_RATE_MCS8;
+       case IEEE80211_VHT_MCS_SUPPORT_0_9:
+               return IWL_TLC_MNG_HT_RATE_MCS9;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
+
+       return 0;
+}
+
+static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta,
+                                       struct ieee80211_sta_vht_cap *vht_cap,
+                                       struct iwl_tlc_config_cmd *cmd)
+{
+       u16 supp;
+       int i, highest_mcs;
+
+       for (i = 0; i < sta->rx_nss; i++) {
+               if (i == MAX_RS_ANT_NUM)
+                       break;
+
+               highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
+               if (!highest_mcs)
+                       continue;
+
+               supp = BIT(highest_mcs + 1) - 1;
+               if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+                       supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
+
+               cmd->ht_supp_rates[i] = cpu_to_le16(supp);
+       }
+}
+
+static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
+                                struct ieee80211_supported_band *sband,
+                                struct iwl_tlc_config_cmd *cmd)
+{
+       int i;
+       unsigned long tmp;
+       unsigned long supp; /* must be unsigned long for for_each_set_bit */
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+       /* non HT rates */
+       supp = 0;
+       tmp = sta->supp_rates[sband->band];
+       for_each_set_bit(i, &tmp, BITS_PER_LONG)
+               supp |= BIT(sband->bitrates[i].hw_value);
+
+       cmd->non_ht_supp_rates = cpu_to_le16(supp);
+       cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
+
+       /* HT/VHT rates */
+       if (vht_cap && vht_cap->vht_supported) {
+               cmd->mode = IWL_TLC_MNG_MODE_VHT;
+               rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
+       } else if (ht_cap && ht_cap->ht_supported) {
+               cmd->mode = IWL_TLC_MNG_MODE_HT;
+               cmd->ht_supp_rates[0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+               cmd->ht_supp_rates[1] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+       }
+}
+
+static void rs_fw_tlc_mng_notif_req_config(struct iwl_mvm *mvm, u8 sta_id)
+{
+       u32 cmd_id = iwl_cmd_id(TLC_MNG_NOTIF_REQ_CMD, DATA_PATH_GROUP, 0);
+       struct iwl_tlc_notif_req_config_cmd cfg_cmd = {
+               .sta_id = sta_id,
+               .flags = cpu_to_le16(IWL_TLC_NOTIF_INIT_RATE_MSK),
+               .interval = cpu_to_le16(IWL_TLC_NOTIF_REQ_INTERVAL),
+       };
+       int ret;
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+       if (ret)
+               IWL_ERR(mvm, "Failed to send TLC notif request (%d)\n", ret);
+}
+
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
+{
+       struct iwl_tlc_update_notif *notif;
+       struct iwl_mvm_sta *mvmsta;
+       struct iwl_lq_sta_rs_fw *lq_sta;
+
+       notif = (void *)pkt->data;
+       mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, notif->sta_id);
+
+       if (!mvmsta) {
+               IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
+                       notif->sta_id);
+               return;
+       }
+
+       lq_sta = &mvmsta->lq_sta.rs_fw;
+
+       if (le16_to_cpu(notif->flags) & IWL_TLC_NOTIF_INIT_RATE_MSK) {
+               lq_sta->last_rate_n_flags =
+                       le32_to_cpu(notif->values[IWL_TLC_NOTIF_INIT_RATE_POS]);
+               IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
+                              lq_sta->last_rate_n_flags);
+       }
+}
+
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                    enum nl80211_band band)
+{
+       struct ieee80211_hw *hw = mvm->hw;
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+       u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
+       struct ieee80211_supported_band *sband;
+       struct iwl_tlc_config_cmd cfg_cmd = {
+               .sta_id = mvmsta->sta_id,
+               .max_supp_ch_width = rs_fw_bw_from_sta_bw(sta),
+               .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
+               .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
+               .max_supp_ss = sta->rx_nss,
+               .max_ampdu_cnt = cpu_to_le32(mvmsta->max_agg_bufsize),
+               .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
+       };
+       int ret;
+
+       memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       iwl_mvm_reset_frame_stats(mvm);
+#endif
+       sband = hw->wiphy->bands[band];
+       rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+       if (ret)
+               IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
+
+       rs_fw_tlc_mng_notif_req_config(mvm, cfg_cmd.sta_id);
+}
+
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+                       bool enable)
+{
+       /* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */
+       IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n");
+       return 0;
+}
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta)
+{
+       struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+       IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+
+       lq_sta->pers.drv = mvm;
+       lq_sta->pers.sta_id = mvmsta->sta_id;
+       lq_sta->pers.chains = 0;
+       memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+       lq_sta->pers.last_rssi = S8_MIN;
+       lq_sta->last_rate_n_flags = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+       lq_sta->pers.dbg_fixed_rate = 0;
+#endif
+}
index c69515ed72dfee73e1b4c4918328a6cc0b77e466..56b3cf1834e581f0a781ad026e8750bcd0b27fa7 100644 (file)
@@ -42,8 +42,6 @@
 #include "mvm.h"
 #include "debugfs.h"
 
-#define RS_NAME "iwl-mvm-rs"
-
 #define IWL_RATE_MAX_WINDOW            62      /* # tx in history window */
 
 /* Calculations of success ratio are done in fixed point where 12800 is 100%.
@@ -809,7 +807,7 @@ static int rs_collect_tlc_data(struct iwl_mvm *mvm,
                return -EINVAL;
 
        if (tbl->column != RS_COLUMN_INVALID) {
-               struct lq_sta_pers *pers = &mvmsta->lq_sta.pers;
+               struct lq_sta_pers *pers = &mvmsta->lq_sta.rs_drv.pers;
 
                pers->tx_stats[tbl->column][scale_index].total += attempts;
                pers->tx_stats[tbl->column][scale_index].success += successes;
@@ -1206,7 +1204,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
        u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
 
        /* Treat uninitialized rate scaling data same as non-existing. */
        if (!lq_sta) {
@@ -1416,13 +1414,13 @@ done:
 /*
  * mac80211 sends us Tx status
  */
-static void rs_mac80211_tx_status(void *mvm_r,
-                                 struct ieee80211_supported_band *sband,
-                                 struct ieee80211_sta *sta, void *priv_sta,
-                                 struct sk_buff *skb)
+static void rs_drv_mac80211_tx_status(void *mvm_r,
+                                     struct ieee80211_supported_band *sband,
+                                     struct ieee80211_sta *sta, void *priv_sta,
+                                     struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r;
+       struct iwl_op_mode *op_mode = mvm_r;
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
@@ -1877,12 +1875,10 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
        struct rs_rate *rate = &search_tbl->rate;
        const struct rs_tx_column *column = &rs_tx_columns[col_id];
        const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
-       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
-                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
        unsigned long rate_mask = 0;
        u32 rate_idx = 0;
 
-       memcpy(search_tbl, tbl, sz);
+       memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win));
 
        rate->sgi = column->sgi;
        rate->ant = column->ant;
@@ -2787,9 +2783,10 @@ out:
 
 /* Save info about RSSI of last Rx */
 void rs_update_last_rssi(struct iwl_mvm *mvm,
-                        struct iwl_lq_sta *lq_sta,
+                        struct iwl_mvm_sta *mvmsta,
                         struct ieee80211_rx_status *rx_status)
 {
+       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
        int i;
 
        lq_sta->pers.chains = rx_status->chains;
@@ -2858,15 +2855,15 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
        iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init);
 }
 
-static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
-                       struct ieee80211_tx_rate_control *txrc)
+static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
+                           void *mvm_sta,
+                           struct ieee80211_tx_rate_control *txrc)
 {
-       struct sk_buff *skb = txrc->skb;
-       struct iwl_op_mode *op_mode __maybe_unused =
-                       (struct iwl_op_mode *)mvm_r;
+       struct iwl_op_mode *op_mode = mvm_r;
        struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+       struct sk_buff *skb = txrc->skb;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct iwl_lq_sta *lq_sta = mvm_sta;
+       struct iwl_lq_sta *lq_sta;
        struct rs_rate *optimal_rate;
        u32 last_ucode_rate;
 
@@ -2878,18 +2875,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
                mvm_sta = NULL;
        }
 
-       /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
-
-       /* Treat uninitialized rate scaling data same as non-existing. */
-       if (lq_sta && !lq_sta->pers.drv) {
-               IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
-               mvm_sta = NULL;
-       }
-
        /* Send management frames and NO_ACK data using lowest rate. */
        if (rate_control_send_low(sta, mvm_sta, txrc))
                return;
 
+       if (!mvm_sta)
+               return;
+
+       lq_sta = mvm_sta;
        iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
                                  info->band, &info->control.rates[0]);
        info->control.rates[0].count = 1;
@@ -2906,13 +2899,13 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
        }
 }
 
-static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
-                         gfp_t gfp)
+static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
+                             gfp_t gfp)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
        struct iwl_mvm *mvm  = IWL_OP_MODE_GET_MVM(op_mode);
-       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
 
        IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
 
@@ -2926,7 +2919,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
        memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
        lq_sta->pers.last_rssi = S8_MIN;
 
-       return &mvmsta->lq_sta;
+       return lq_sta;
 }
 
 static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
@@ -3043,7 +3036,7 @@ static void rs_vht_init(struct iwl_mvm *mvm,
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
 {
        spin_lock_bh(&mvm->drv_stats_lock);
        memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
@@ -3111,15 +3104,15 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
 /*
  * Called after adding a new station to initialize rate scaling
  */
-void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                         enum nl80211_band band, bool init)
+static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                            enum nl80211_band band, bool init)
 {
        int i, j;
        struct ieee80211_hw *hw = mvm->hw;
        struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
        struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
        struct ieee80211_supported_band *sband;
        unsigned long supp; /* must be unsigned long for for_each_set_bit */
 
@@ -3194,16 +3187,15 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        rs_initialize_lq(mvm, sta, lq_sta, band, init);
 }
 
-static void rs_rate_update(void *mvm_r,
-                          struct ieee80211_supported_band *sband,
-                          struct cfg80211_chan_def *chandef,
-                          struct ieee80211_sta *sta, void *priv_sta,
-                          u32 changed)
+static void rs_drv_rate_update(void *mvm_r,
+                              struct ieee80211_supported_band *sband,
+                              struct cfg80211_chan_def *chandef,
+                              struct ieee80211_sta *sta,
+                              void *priv_sta, u32 changed)
 {
+       struct iwl_op_mode *op_mode = mvm_r;
+       struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
        u8 tid;
-       struct iwl_op_mode *op_mode  =
-                       (struct iwl_op_mode *)mvm_r;
-       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
        if (!iwl_mvm_sta_from_mac80211(sta)->vif)
                return;
@@ -3385,7 +3377,7 @@ static void rs_bfer_active_iter(void *_data,
 {
        struct rs_bfer_active_iter_data *data = _data;
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq;
+       struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.rs_drv.lq;
        u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
 
        if (sta == data->exclude_sta)
@@ -3497,7 +3489,8 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
 
        /* Disallow BFER on another STA if active and we're a higher priority */
        if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
-               struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq;
+               struct iwl_lq_cmd *bfersta_lq_cmd =
+                       &bfer_mvmsta->lq_sta.rs_drv.lq;
                u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
 
                bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
@@ -3569,14 +3562,14 @@ static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
 {
        return hw->priv;
 }
+
 /* rate scale requires free function to be implemented */
 static void rs_free(void *mvm_rate)
 {
        return;
 }
 
-static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
-                       void *mvm_sta)
+static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta)
 {
        struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
        struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
@@ -3586,7 +3579,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
 }
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-int rs_pretty_print_rate(char *buf, const u32 rate)
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
 {
 
        char *type, *bw;
@@ -3597,10 +3590,10 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
            !(rate & RATE_MCS_VHT_MSK)) {
                int index = iwl_hwrate_to_plcp_idx(rate);
 
-               return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n",
-                              rs_pretty_ant(ant),
-                              index == IWL_RATE_INVALID ? "BAD" :
-                              iwl_rate_mcs[index].mbps);
+               return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
+                                rs_pretty_ant(ant),
+                                index == IWL_RATE_INVALID ? "BAD" :
+                                iwl_rate_mcs[index].mbps);
        }
 
        if (rate & RATE_MCS_VHT_MSK) {
@@ -3634,12 +3627,13 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
                bw = "BAD BW";
        }
 
-       return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
-                      type, rs_pretty_ant(ant), bw, mcs, nss,
-                      (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
-                      (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
-                      (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
-                      (rate & RATE_MCS_BF_MSK) ? "BF " : "");
+       return scnprintf(buf, bufsz,
+                        "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
+                        type, rs_pretty_ant(ant), bw, mcs, nss,
+                        (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
+                        (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+                        (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+                        (rate & RATE_MCS_BF_MSK) ? "BF " : "");
 }
 
 /**
@@ -3696,65 +3690,70 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
        int desc = 0;
        int i = 0;
        ssize_t ret;
+       static const size_t bufsz = 2048;
 
        struct iwl_lq_sta *lq_sta = file->private_data;
        struct iwl_mvm_sta *mvmsta =
-               container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+               container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
        struct iwl_mvm *mvm;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
        struct rs_rate *rate = &tbl->rate;
        u32 ss_params;
 
        mvm = lq_sta->pers.drv;
-       buff = kmalloc(2048, GFP_KERNEL);
+       buff = kmalloc(bufsz, GFP_KERNEL);
        if (!buff)
                return -ENOMEM;
 
-       desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
-       desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n",
-                       lq_sta->total_failed, lq_sta->total_success,
-                       lq_sta->active_legacy_rate);
-       desc += sprintf(buff+desc, "fixed rate 0x%X\n",
-                       lq_sta->pers.dbg_fixed_rate);
-       desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
+       desc += scnprintf(buff + desc, bufsz - desc,
+                         "sta_id %d\n", lq_sta->lq.sta_id);
+       desc += scnprintf(buff + desc, bufsz - desc,
+                         "failed=%d success=%d rate=0%lX\n",
+                         lq_sta->total_failed, lq_sta->total_success,
+                         lq_sta->active_legacy_rate);
+       desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n",
+                         lq_sta->pers.dbg_fixed_rate);
+       desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n",
            (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
            (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
            (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
-       desc += sprintf(buff+desc, "lq type %s\n",
-                       (is_legacy(rate)) ? "legacy" :
-                       is_vht(rate) ? "VHT" : "HT");
+       desc += scnprintf(buff + desc, bufsz - desc, "lq type %s\n",
+                         (is_legacy(rate)) ? "legacy" :
+                         is_vht(rate) ? "VHT" : "HT");
        if (!is_legacy(rate)) {
-               desc += sprintf(buff + desc, " %s",
+               desc += scnprintf(buff + desc, bufsz - desc, " %s",
                   (is_siso(rate)) ? "SISO" : "MIMO2");
-               desc += sprintf(buff + desc, " %s",
+               desc += scnprintf(buff + desc, bufsz - desc, " %s",
                                (is_ht20(rate)) ? "20MHz" :
                                (is_ht40(rate)) ? "40MHz" :
                                (is_ht80(rate)) ? "80MHz" :
                                (is_ht160(rate)) ? "160MHz" : "BAD BW");
-               desc += sprintf(buff + desc, " %s %s %s %s\n",
+               desc += scnprintf(buff + desc, bufsz - desc, " %s %s %s %s\n",
                                (rate->sgi) ? "SGI" : "NGI",
                                (rate->ldpc) ? "LDPC" : "BCC",
                                (lq_sta->is_agg) ? "AGG on" : "",
                                (mvmsta->tlc_amsdu) ? "AMSDU on" : "");
        }
-       desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+       desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n",
                        lq_sta->last_rate_n_flags);
-       desc += sprintf(buff+desc,
+       desc += scnprintf(buff + desc, bufsz - desc,
                        "general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
                        lq_sta->lq.flags,
                        lq_sta->lq.mimo_delim,
                        lq_sta->lq.single_stream_ant_msk,
                        lq_sta->lq.dual_stream_ant_msk);
 
-       desc += sprintf(buff+desc,
+       desc += scnprintf(buff + desc, bufsz - desc,
                        "agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
                        le16_to_cpu(lq_sta->lq.agg_time_limit),
                        lq_sta->lq.agg_disable_start_th,
                        lq_sta->lq.agg_frame_cnt_limit);
 
-       desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
+       desc += scnprintf(buff + desc, bufsz - desc, "reduced tpc=%d\n",
+                         lq_sta->lq.reduced_tpc);
        ss_params = le32_to_cpu(lq_sta->lq.ss_params);
-       desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
+       desc += scnprintf(buff + desc, bufsz - desc,
+                       "single stream params: %s%s%s%s\n",
                        (ss_params & LQ_SS_PARAMS_VALID) ?
                        "VALID" : "INVALID",
                        (ss_params & LQ_SS_BFER_ALLOWED) ?
@@ -3763,7 +3762,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
                        ", STBC" : "",
                        (ss_params & LQ_SS_FORCE) ?
                        ", FORCE" : "");
-       desc += sprintf(buff+desc,
+       desc += scnprintf(buff + desc, bufsz - desc,
                        "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
                        lq_sta->lq.initial_rate_index[0],
                        lq_sta->lq.initial_rate_index[1],
@@ -3773,8 +3772,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
        for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
                u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
 
-               desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r);
-               desc += rs_pretty_print_rate(buff+desc, r);
+               desc += scnprintf(buff + desc, bufsz - desc,
+                                 " rate[%d] 0x%X ", i, r);
+               desc += rs_pretty_print_rate(buff + desc, bufsz - desc, r);
        }
 
        ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
@@ -3987,12 +3987,13 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
 
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
 
-static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
+static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta,
+                                  struct dentry *dir)
 {
        struct iwl_lq_sta *lq_sta = priv_sta;
        struct iwl_mvm_sta *mvmsta;
 
-       mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+       mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
 
        if (!mvmsta->vif)
                return;
@@ -4014,7 +4015,7 @@ err:
        IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
 }
 
-static void rs_remove_debugfs(void *mvm, void *mvm_sta)
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta)
 {
 }
 #endif
@@ -4024,50 +4025,53 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
  * the station is added. Since mac80211 calls this function before a
  * station is added we ignore it.
  */
-static void rs_rate_init_stub(void *mvm_r,
-                             struct ieee80211_supported_band *sband,
-                             struct cfg80211_chan_def *chandef,
-                             struct ieee80211_sta *sta, void *mvm_sta)
+static void rs_rate_init_ops(void *mvm_r,
+                            struct ieee80211_supported_band *sband,
+                            struct cfg80211_chan_def *chandef,
+                            struct ieee80211_sta *sta, void *mvm_sta)
 {
 }
 
-static const struct rate_control_ops rs_mvm_ops = {
+/* ops for rate scaling implemented in the driver */
+static const struct rate_control_ops rs_mvm_ops_drv = {
        .name = RS_NAME,
-       .tx_status = rs_mac80211_tx_status,
-       .get_rate = rs_get_rate,
-       .rate_init = rs_rate_init_stub,
+       .tx_status = rs_drv_mac80211_tx_status,
+       .get_rate = rs_drv_get_rate,
+       .rate_init = rs_rate_init_ops,
        .alloc = rs_alloc,
        .free = rs_free,
-       .alloc_sta = rs_alloc_sta,
+       .alloc_sta = rs_drv_alloc_sta,
        .free_sta = rs_free_sta,
-       .rate_update = rs_rate_update,
+       .rate_update = rs_drv_rate_update,
 #ifdef CONFIG_MAC80211_DEBUGFS
-       .add_sta_debugfs = rs_add_debugfs,
-       .remove_sta_debugfs = rs_remove_debugfs,
+       .add_sta_debugfs = rs_drv_add_sta_debugfs,
+       .remove_sta_debugfs = rs_remove_sta_debugfs,
 #endif
 };
 
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                         enum nl80211_band band, bool init)
+{
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
+               rs_fw_rate_init(mvm, sta, band);
+       else
+               rs_drv_rate_init(mvm, sta, band, init);
+}
+
 int iwl_mvm_rate_control_register(void)
 {
-       return ieee80211_rate_control_register(&rs_mvm_ops);
+       return ieee80211_rate_control_register(&rs_mvm_ops_drv);
 }
 
 void iwl_mvm_rate_control_unregister(void)
 {
-       ieee80211_rate_control_unregister(&rs_mvm_ops);
+       ieee80211_rate_control_unregister(&rs_mvm_ops_drv);
 }
 
-/**
- * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
- * Tx protection, according to this request and previous requests,
- * and send the LQ command.
- * @mvmsta: The station
- * @enable: Enable Tx protection?
- */
-int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
-                         bool enable)
+static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+                               bool enable)
 {
-       struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq;
+       struct iwl_lq_cmd *lq = &mvmsta->lq_sta.rs_drv.lq;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -4083,3 +4087,17 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
 
        return iwl_mvm_send_lq_cmd(mvm, lq, false);
 }
+
+/**
+ * iwl_mvm_tx_protection - ask FW to enable RTS/CTS protection
+ * @mvmsta: The station
+ * @enable: Enable Tx protection?
+ */
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+                         bool enable)
+{
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
+               return rs_fw_tx_protection(mvm, mvmsta, enable);
+       else
+               return rs_drv_tx_protection(mvm, mvmsta, enable);
+}
index 32b4d66debea224c0ae88c0ff8b7f1a48b120118..fb18cb8c233d12e56f846af07bcfa3eebdb6ea3e 100644 (file)
@@ -36,6 +36,8 @@
 #include "fw-api.h"
 #include "iwl-trans.h"
 
+#define RS_NAME "iwl-mvm-rs"
+
 struct iwl_rs_rate_info {
        u8 plcp;          /* uCode API:  IWL_RATE_6M_PLCP, etc. */
        u8 plcp_ht_siso;  /* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
@@ -217,6 +219,38 @@ struct iwl_rate_mcs_info {
        char    mcs[IWL_MAX_MCS_DISPLAY_SIZE];
 };
 
+/**
+ * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW
+ * @last_rate_n_flags: last rate reported by FW
+ * @sta_id: the id of the station
+#ifdef CONFIG_MAC80211_DEBUGFS
+ * @dbg_fixed_rate: for debug, use fixed rate if not 0
+ * @dbg_agg_frame_count_lim: for debug, max number of frames in A-MPDU
+#endif
+ * @chains: bitmask of chains reported in %chain_signal
+ * @chain_signal: per chain signal strength
+ * @last_rssi: last rssi reported
+ * @drv: pointer back to the driver data
+ */
+
+struct iwl_lq_sta_rs_fw {
+       /* last tx rate_n_flags */
+       u32 last_rate_n_flags;
+
+       /* persistent fields - initialized only once - keep last! */
+       struct lq_sta_pers_rs_fw {
+               u32 sta_id;
+#ifdef CONFIG_MAC80211_DEBUGFS
+               u32 dbg_fixed_rate;
+               u16 dbg_agg_frame_count_lim;
+#endif
+               u8 chains;
+               s8 chain_signal[IEEE80211_MAX_CHAINS];
+               s8 last_rssi;
+               struct iwl_mvm *drv;
+       } pers;
+};
+
 /**
  * struct iwl_rate_scale_data -- tx success history for one rate
  */
@@ -407,4 +441,18 @@ struct iwl_mvm_sta;
 int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
                          bool enable);
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
+#endif
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
+#endif
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                    enum nl80211_band band);
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+                       bool enable);
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt);
 #endif /* __rs__ */
index d1a40688d5e10e9f01f77953314fcec291748b3f..63a57f0a16eff4d1e00fe4293d660dd8d03b1537 100644 (file)
@@ -383,7 +383,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
                                                                 false);
                }
 
-               rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+               rs_update_last_rssi(mvm, mvmsta, rx_status);
 
                if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
                    ieee80211_is_beacon(hdr->frame_control)) {
@@ -439,7 +439,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
                rx_status->bw = RATE_INFO_BW_160;
                break;
        }
-       if (rate_n_flags & RATE_MCS_SGI_MSK)
+       if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+           rate_n_flags & RATE_MCS_SGI_MSK)
                rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
        if (rate_n_flags & RATE_HT_MCS_GF_MSK)
                rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
index 3b8d44361380de8d2a0a9635a915fe8151388ecf..4a70e62c1b879a338edbe97ba442ebd68112b6e3 100644 (file)
@@ -943,7 +943,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                                                                 false);
                }
 
-               rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+               rs_update_last_rssi(mvm, mvmsta, rx_status);
 
                if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
                    ieee80211_is_beacon(hdr->frame_control)) {
@@ -1020,7 +1020,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                rx_status->bw = RATE_INFO_BW_160;
                break;
        }
-       if (rate_n_flags & RATE_MCS_SGI_MSK)
+
+       if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+           rate_n_flags & RATE_MCS_SGI_MSK)
                rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
        if (rate_n_flags & RATE_HT_MCS_GF_MSK)
                rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
index e4fd476e9ccb0888d572ee296a4947c9fd3903ca..356b16f40e7849475e1715ab633ce52c36d5e898 100644 (file)
@@ -664,6 +664,22 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
        return newpos;
 }
 
+#define WFA_TPC_IE_LEN 9
+
+static void iwl_mvm_add_tpc_report_ie(u8 *pos)
+{
+       pos[0] = WLAN_EID_VENDOR_SPECIFIC;
+       pos[1] = WFA_TPC_IE_LEN - 2;
+       pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
+       pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
+       pos[4] = WLAN_OUI_MICROSOFT & 0xff;
+       pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
+       pos[6] = 0;
+       /* pos[7] - tx power will be inserted by the FW */
+       pos[7] = 0;
+       pos[8] = 0;
+}
+
 static void
 iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                         struct ieee80211_scan_ies *ies,
@@ -716,7 +732,16 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        memcpy(pos, ies->common_ies, ies->common_ie_len);
        params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
-       params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
+
+       if (iwl_mvm_rrm_scan_needed(mvm) &&
+           !fw_has_capa(&mvm->fw->ucode_capa,
+                        IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) {
+               iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len);
+               params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
+                                                          WFA_TPC_IE_LEN);
+       } else {
+               params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
+       }
 }
 
 static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
@@ -781,7 +806,9 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
        if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
                flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 
-       if (iwl_mvm_rrm_scan_needed(mvm))
+       if (iwl_mvm_rrm_scan_needed(mvm) &&
+           fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
                flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
 
        if (params->pass_all)
@@ -1183,7 +1210,9 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
                        flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
        }
 
-       if (iwl_mvm_rrm_scan_needed(mvm))
+       if (iwl_mvm_rrm_scan_needed(mvm) &&
+           fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
 
        if (params->pass_all)
index 1add5615fc3ad9d0a9801920448e79f71e64eda5..9d33f7a0a80aa529b8345b5737c4d138dba59682 100644 (file)
@@ -1439,6 +1439,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                        goto err;
        }
 
+       /*
+        * if rs is registered with mac80211, then "add station" will be handled
+        * via the corresponding ops, otherwise need to notify rate scaling here
+        */
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
+               iwl_mvm_rs_add_sta(mvm, mvm_sta);
+
 update_fw:
        ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
        if (ret)
@@ -1762,7 +1769,7 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
        }
 
        /*
-        * For a000 firmware and on we cannot add queue to a station unknown
+        * For 22000 firmware and on we cannot add queue to a station unknown
         * to firmware so enable queue here - after the station was added
         */
        if (iwl_mvm_has_new_tx_api(mvm))
@@ -1885,7 +1892,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                return ret;
 
        /*
-        * For a000 firmware and on we cannot add queue to a station unknown
+        * For 22000 firmware and on we cannot add queue to a station unknown
         * to firmware so enable queue here - after the station was added
         */
        if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -2064,7 +2071,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        /*
         * Enable cab queue after the ADD_STA command is sent.
-        * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
+        * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
         * command with unknown station id, and for FW that doesn't support
         * station API since the cab queue is not included in the
         * tfd_queue_mask.
@@ -2530,7 +2537,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                            tid_data->next_reclaimed);
 
        /*
-        * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+        * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
         * to align the wrap around of ssn so we compare relevant values.
         */
        normalized_ssn = tid_data->ssn;
@@ -2575,6 +2582,14 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                .aggregate = true,
        };
 
+       /*
+        * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
+        * manager, so this function should never be called in this case.
+        */
+       if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa,
+                                    IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)))
+               return -EINVAL;
+
        BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
                     != IWL_MAX_TID_COUNT);
 
@@ -2672,12 +2687,12 @@ out:
         */
        mvmsta->max_agg_bufsize =
                min(mvmsta->max_agg_bufsize, buf_size);
-       mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+       mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
 
        IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
                     sta->addr, tid);
 
-       return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
+       return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
 }
 
 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
@@ -3615,7 +3630,7 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
        u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
 
        /*
-        * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+        * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
         * to align the wrap around of ssn so we compare relevant values.
         */
        if (mvm->trans->cfg->gen2)
index aedabe101cf0f09681a7c61cfa6d6eb326b033a6..5ffd6adbc38399f2d2e430b7f0c3a098241ac85a 100644 (file)
@@ -383,6 +383,8 @@ struct iwl_mvm_rxq_dup_data {
  * and from Tx response flow, it needs a spinlock.
  * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
  * @tid_to_baid: a simple map of TID to baid
+ * @lq_sta: holds rate scaling data, either for the case when RS is done in
+ *     the driver - %rs_drv or in the FW - %rs_fw.
  * @reserved_queue: the queue reserved for this STA for DQA purposes
  *     Every STA has is given one reserved queue to allow it to operate. If no
  *     such queue can be guaranteed, the STA addition will fail.
@@ -417,7 +419,10 @@ struct iwl_mvm_sta {
        spinlock_t lock;
        struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
        u8 tid_to_baid[IWL_MAX_TID_COUNT];
-       struct iwl_lq_sta lq_sta;
+       union {
+               struct iwl_lq_sta_rs_fw rs_fw;
+               struct iwl_lq_sta rs_drv;
+       } lq_sta;
        struct ieee80211_vif *vif;
        struct iwl_mvm_key_pn __rcu *ptk_pn[4];
        struct iwl_mvm_rxq_dup_data *dup_data;
index e25cda9fbf6c34d951b7441b40574bfb9c0a67b7..200ab50ec86b2f22a0b2eeacf102a60c1c293f1c 100644 (file)
@@ -101,7 +101,6 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
 void iwl_mvm_roc_done_wk(struct work_struct *wk)
 {
        struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
-       u32 queues = 0;
 
        /*
         * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
@@ -110,14 +109,10 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
         * in the case that the time event actually completed in the firmware
         * (which is handled in iwl_mvm_te_handle_notif).
         */
-       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
-               queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
+       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
                iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
-       }
-       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
-               queues |= BIT(mvm->aux_queue);
+       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
                iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
-       }
 
        synchronize_net();
 
@@ -777,12 +772,6 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return -EBUSY;
        }
 
-       /*
-        * Flush the done work, just in case it's still pending, so that
-        * the work it does can complete and we can accept new frames.
-        */
-       flush_work(&mvm->roc_done_wk);
-
        time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
        time_cmd.id_and_color =
                cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
index 333bcb75b8afcd17eecd76c1a581c683edef4457..84d16522d6647e69b8cda730e952e1f60baa003d 100644 (file)
@@ -1132,7 +1132,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
        }
 
        /*
-        * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+        * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
         * to align the wrap around of ssn so we compare relevant values.
         */
        normalized_ssn = tid_data->ssn;
@@ -1624,7 +1624,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
        int freed;
 
        if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
-                     tid >= IWL_MAX_TID_COUNT,
+                     tid > IWL_MAX_TID_COUNT,
                      "sta_id %d tid %d", sta_id, tid))
                return;
 
@@ -1679,7 +1679,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
                if (ieee80211_is_data_qos(hdr->frame_control))
                        freed++;
                else
-                       WARN_ON_ONCE(1);
+                       WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
 
                iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
 
@@ -1719,8 +1719,12 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
                ba_info->band = chanctx_conf->def.chan->band;
                iwl_mvm_hwrate_to_tx_status(rate, ba_info);
 
-               IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
-               iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+               if (!fw_has_capa(&mvm->fw->ucode_capa,
+                                IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)) {
+                       IWL_DEBUG_TX_REPLY(mvm,
+                                          "No reclaim. Update rs directly\n");
+                       iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+               }
        }
 
 out:
@@ -1771,8 +1775,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
                        struct iwl_mvm_compressed_ba_tfd *ba_tfd =
                                &ba_res->tfd[i];
 
+                       tid = ba_tfd->tid;
+                       if (tid == IWL_MGMT_TID)
+                               tid = IWL_MAX_TID_COUNT;
+
                        mvmsta->tid_data[i].lq_color = lq_color;
-                       iwl_mvm_tx_reclaim(mvm, sta_id, ba_tfd->tid,
+                       iwl_mvm_tx_reclaim(mvm, sta_id, tid,
                                           (int)(le16_to_cpu(ba_tfd->q_num)),
                                           le16_to_cpu(ba_tfd->tfd_index),
                                           &ba_info,
index 03ffd84786ca4f1b832fe78177ac0ef090d4dddd..0b7e29be8e50841bc1e9942d7904bf0aebf0f5dc 100644 (file)
@@ -278,8 +278,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
        u8 ind = last_idx;
        int i;
 
-       for (i = 0; i < RATE_MCS_ANT_NUM; i++) {
-               ind = (ind + 1) % RATE_MCS_ANT_NUM;
+       for (i = 0; i < MAX_RS_ANT_NUM; i++) {
+               ind = (ind + 1) % MAX_RS_ANT_NUM;
                if (valid & BIT(ind))
                        return ind;
        }
@@ -595,6 +595,12 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
 
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
 {
+       if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
+               IWL_ERR(mvm,
+                       "DEVICE_ENABLED bit is not set. Aborting dump.\n");
+               return;
+       }
+
        iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
 
        if (mvm->error_event_table[1])
@@ -906,7 +912,9 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
                .data = { lq, },
        };
 
-       if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA))
+       if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
+                   fw_has_capa(&mvm->fw->ucode_capa,
+                               IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)))
                return -EINVAL;
 
        return iwl_mvm_send_cmd(mvm, &cmd);
index ccd7c33c4c2823e3374934a7e25da8cac82996a1..56fc28750a41277364202c0fa109df905e084202 100644 (file)
@@ -652,20 +652,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
 
-/* a000 Series */
-       {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0310, iwla000_2ac_cfg_jf)},
-       {IWL_PCI_DEVICE(0x2720, 0x0000, iwla000_2ax_cfg_hr)},
-       {IWL_PCI_DEVICE(0x34F0, 0x0070, iwla000_2ax_cfg_hr)},
-       {IWL_PCI_DEVICE(0x2720, 0x0078, iwla000_2ax_cfg_hr)},
-       {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)},
-       {IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)},
-       {IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)},
-       {IWL_PCI_DEVICE(0x2720, 0x0090, iwla000_2ac_cfg_hr_cdb)},
-       {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
-       {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
-       {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)},
+/* 22000 Series */
+       {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
+       {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+       {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
+       {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
+       {IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
+       {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
+       {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
+       {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
+       {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)},
+       {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
 
 #endif /* CONFIG_IWLMVM */
 
@@ -707,7 +707,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                iwl_trans->cfg = cfg_7265d;
        }
 
-       if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb &&
+       if (iwl_trans->cfg->rf_id && cfg == &iwl22000_2ac_cfg_hr_cdb &&
            iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) {
                u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id);
                u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF);
@@ -715,14 +715,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
                if (rf_id_chp == jf_chp_id) {
                        if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
-                               cfg = &iwla000_2ax_cfg_qnj_jf_b0;
+                               cfg = &iwl22000_2ax_cfg_qnj_jf_b0;
                        else
-                               cfg = &iwla000_2ac_cfg_jf;
+                               cfg = &iwl22000_2ac_cfg_jf;
                } else if (rf_id_chp == hr_chp_id) {
                        if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
-                               cfg = &iwla000_2ax_cfg_qnj_hr_a0;
+                               cfg = &iwl22000_2ax_cfg_qnj_hr_a0;
                        else
-                               cfg = &iwla000_2ac_cfg_hr;
+                               cfg = &iwl22000_2ac_cfg_hr;
                }
                iwl_trans->cfg = cfg;
        }
index 4541c86881d604e16093eef51cb7ba48afb3685b..fbc45361f0bb516e3c74c378eb60aaa6f9c10518 100644 (file)
@@ -3250,9 +3250,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
                hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
                if (hw_status & UMAG_GEN_HW_IS_FPGA)
-                       trans->cfg = &iwla000_2ax_cfg_qnj_hr_f0;
+                       trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0;
                else
-                       trans->cfg = &iwla000_2ac_cfg_hr;
+                       trans->cfg = &iwl22000_2ac_cfg_hr;
        }
 #endif
 
index 10b075a46b266218c53d1e5674c1789e1e0f3d80..e8189c07b41f6b450f135ef703ec4e01568e311d 100644 (file)
@@ -684,6 +684,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
        hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN);
        hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
                                         IEEE80211_STYPE_NULLFUNC |
+                                        IEEE80211_FCTL_TODS |
                                         (ps ? IEEE80211_FCTL_PM : 0));
        hdr->duration_id = cpu_to_le16(0);
        memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
@@ -3215,7 +3216,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
                if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
                        continue;
 
-               skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+               skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
                if (!skb) {
                        res = -ENOMEM;
                        goto out_err;
index 6e0d9a9c5cfb03023c9fc91d158ea714ebaace19..ce4432c535f0543757345d352f32a5b08972c4ba 100644 (file)
@@ -1116,6 +1116,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
        enum nl80211_iftype curr_iftype = dev->ieee80211_ptr->iftype;
 
+       if (priv->scan_request) {
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "change virtual interface: scan in process\n");
+               return -EBUSY;
+       }
+
        switch (curr_iftype) {
        case NL80211_IFTYPE_ADHOC:
                switch (type) {
@@ -1180,7 +1186,6 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
        case NL80211_IFTYPE_AP:
                switch (type) {
                case NL80211_IFTYPE_ADHOC:
-               case NL80211_IFTYPE_STATION:
                        return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
                                                               type, params);
                        break;
index e813b2ca740c26ae6460ebe258c3280cb9cb2472..8e4e9b6919e0274cbdbc3a2cfde72b2e40c8fb6f 100644 (file)
@@ -199,7 +199,7 @@ struct mwl8k_priv {
        struct ieee80211_channel channels_24[14];
        struct ieee80211_rate rates_24[13];
        struct ieee80211_supported_band band_50;
-       struct ieee80211_channel channels_50[4];
+       struct ieee80211_channel channels_50[9];
        struct ieee80211_rate rates_50[8];
        u32 ap_macids_supported;
        u32 sta_macids_supported;
@@ -383,6 +383,11 @@ static const struct ieee80211_channel mwl8k_channels_50[] = {
        { .band = NL80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
        { .band = NL80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
        { .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5745, .hw_value = 149, },
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5765, .hw_value = 153, },
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5785, .hw_value = 157, },
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5805, .hw_value = 161, },
+       { .band = NL80211_BAND_5GHZ, .center_freq = 5825, .hw_value = 165, },
 };
 
 static const struct ieee80211_rate mwl8k_rates_50[] = {
index 28843fed750a2f6a8bb03368092ca10b698e0dfe..92ce4062f307768e740b5124741bde16aa9f6d29 100644 (file)
@@ -11,4 +11,5 @@ config WLAN_VENDOR_MEDIATEK
 
 if WLAN_VENDOR_MEDIATEK
 source "drivers/net/wireless/mediatek/mt7601u/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/Kconfig"
 endif # WLAN_VENDOR_MEDIATEK
index 9d5f182fd7fdc52696fa388969db64357d209911..00f945f59b38354b02bec6ad0d85c5974c079a53 100644 (file)
@@ -1 +1,2 @@
 obj-$(CONFIG_MT7601U)  += mt7601u/
+obj-$(CONFIG_MT76_CORE)        += mt76/
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
new file mode 100644 (file)
index 0000000..fc05d79
--- /dev/null
@@ -0,0 +1,10 @@
+config MT76_CORE
+       tristate
+
+config MT76x2E
+       tristate "MediaTek MT76x2E (PCIe) support"
+       select MT76_CORE
+       depends on MAC80211
+       depends on PCI
+       ---help---
+         This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
new file mode 100644 (file)
index 0000000..2bb9198
--- /dev/null
@@ -0,0 +1,15 @@
+obj-$(CONFIG_MT76_CORE) += mt76.o
+obj-$(CONFIG_MT76x2E) += mt76x2e.o
+
+mt76-y := \
+       mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o
+
+CFLAGS_trace.o := -I$(src)
+
+mt76x2e-y := \
+       mt76x2_pci.o mt76x2_dma.o \
+       mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \
+       mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \
+       mt76x2_dfs.o mt76x2_trace.o
+
+CFLAGS_mt76x2_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
new file mode 100644 (file)
index 0000000..7c3612a
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "mt76.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+       struct mt76_dev *dev = data;
+
+       dev->bus->wr(dev, dev->debugfs_reg, val);
+       return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+       struct mt76_dev *dev = data;
+
+       *val = dev->bus->rr(dev, dev->debugfs_reg);
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+
+static int
+mt76_queues_read(struct seq_file *s, void *data)
+{
+       struct mt76_dev *dev = dev_get_drvdata(s->private);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
+               struct mt76_queue *q = &dev->q_tx[i];
+
+               if (!q->ndesc)
+                       continue;
+
+               seq_printf(s,
+                          "%d: queued=%d head=%d tail=%d swq_queued=%d\n",
+                          i, q->queued, q->head, q->tail, q->swq_queued);
+       }
+
+       return 0;
+}
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
+{
+       struct dentry *dir;
+
+       dir = debugfs_create_dir("mt76", dev->hw->wiphy->debugfsdir);
+       if (!dir)
+               return NULL;
+
+       debugfs_create_u8("led_pin", S_IRUSR | S_IWUSR, dir, &dev->led_pin);
+       debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+       debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
+                           &fops_regval);
+       debugfs_create_blob("eeprom", S_IRUSR, dir, &dev->eeprom);
+       if (dev->otp.data)
+               debugfs_create_blob("otp", S_IRUSR, dir, &dev->otp);
+       debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read);
+
+       return dir;
+}
+EXPORT_SYMBOL_GPL(mt76_register_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
new file mode 100644 (file)
index 0000000..ecd409a
--- /dev/null
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "mt76.h"
+#include "dma.h"
+
+#define DMA_DUMMY_TXWI ((void *) ~0)
+
+static int
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+       int size;
+       int i;
+
+       spin_lock_init(&q->lock);
+       INIT_LIST_HEAD(&q->swq);
+
+       size = q->ndesc * sizeof(struct mt76_desc);
+       q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
+       if (!q->desc)
+               return -ENOMEM;
+
+       size = q->ndesc * sizeof(*q->entry);
+       q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+       if (!q->entry)
+               return -ENOMEM;
+
+       /* clear descriptors */
+       for (i = 0; i < q->ndesc; i++)
+               q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+       iowrite32(q->desc_dma, &q->regs->desc_base);
+       iowrite32(0, &q->regs->cpu_idx);
+       iowrite32(0, &q->regs->dma_idx);
+       iowrite32(q->ndesc, &q->regs->ring_size);
+
+       return 0;
+}
+
+static int
+mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+                struct mt76_queue_buf *buf, int nbufs, u32 info,
+                struct sk_buff *skb, void *txwi)
+{
+       struct mt76_desc *desc;
+       u32 ctrl;
+       int i, idx = -1;
+
+       if (txwi)
+               q->entry[q->head].txwi = DMA_DUMMY_TXWI;
+
+       for (i = 0; i < nbufs; i += 2, buf += 2) {
+               u32 buf0 = buf[0].addr, buf1 = 0;
+
+               ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+               if (i < nbufs - 1) {
+                       buf1 = buf[1].addr;
+                       ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+               }
+
+               if (i == nbufs - 1)
+                       ctrl |= MT_DMA_CTL_LAST_SEC0;
+               else if (i == nbufs - 2)
+                       ctrl |= MT_DMA_CTL_LAST_SEC1;
+
+               idx = q->head;
+               q->head = (q->head + 1) % q->ndesc;
+
+               desc = &q->desc[idx];
+
+               WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
+               WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
+               WRITE_ONCE(desc->info, cpu_to_le32(info));
+               WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+
+               q->queued++;
+       }
+
+       q->entry[idx].txwi = txwi;
+       q->entry[idx].skb = skb;
+
+       return idx;
+}
+
+static void
+mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+                       struct mt76_queue_entry *prev_e)
+{
+       struct mt76_queue_entry *e = &q->entry[idx];
+       __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
+       u32 ctrl = le32_to_cpu(__ctrl);
+
+       if (!e->txwi || !e->skb) {
+               __le32 addr = READ_ONCE(q->desc[idx].buf0);
+               u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
+
+               dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+                                DMA_TO_DEVICE);
+       }
+
+       if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
+               __le32 addr = READ_ONCE(q->desc[idx].buf1);
+               u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
+
+               dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+                                DMA_TO_DEVICE);
+       }
+
+       if (e->txwi == DMA_DUMMY_TXWI)
+               e->txwi = NULL;
+
+       *prev_e = *e;
+       memset(e, 0, sizeof(*e));
+}
+
+static void
+mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
+{
+       q->head = ioread32(&q->regs->dma_idx);
+       q->tail = q->head;
+       iowrite32(q->head, &q->regs->cpu_idx);
+}
+
+static void
+mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
+{
+       struct mt76_queue *q = &dev->q_tx[qid];
+       struct mt76_queue_entry entry;
+       bool wake = false;
+       int last;
+
+       if (!q->ndesc)
+               return;
+
+       spin_lock_bh(&q->lock);
+       if (flush)
+               last = -1;
+       else
+               last = ioread32(&q->regs->dma_idx);
+
+       while (q->queued && q->tail != last) {
+               mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
+               if (entry.schedule)
+                       q->swq_queued--;
+
+               if (entry.skb)
+                       dev->drv->tx_complete_skb(dev, q, &entry, flush);
+
+               if (entry.txwi) {
+                       mt76_put_txwi(dev, entry.txwi);
+                       wake = true;
+               }
+
+               q->tail = (q->tail + 1) % q->ndesc;
+               q->queued--;
+
+               if (!flush && q->tail == last)
+                       last = ioread32(&q->regs->dma_idx);
+       }
+
+       if (!flush)
+               mt76_txq_schedule(dev, q);
+       else
+               mt76_dma_sync_idx(dev, q);
+
+       wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+       spin_unlock_bh(&q->lock);
+
+       if (wake)
+               ieee80211_wake_queue(dev->hw, qid);
+}
+
+static void *
+mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+                int *len, u32 *info, bool *more)
+{
+       struct mt76_queue_entry *e = &q->entry[idx];
+       struct mt76_desc *desc = &q->desc[idx];
+       dma_addr_t buf_addr;
+       void *buf = e->buf;
+       int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+       buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
+       if (len) {
+               u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
+               *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
+               *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
+       }
+
+       if (info)
+               *info = le32_to_cpu(desc->info);
+
+       dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
+       e->buf = NULL;
+
+       return buf;
+}
+
+static void *
+mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+                int *len, u32 *info, bool *more)
+{
+       int idx = q->tail;
+
+       *more = false;
+       if (!q->queued)
+               return NULL;
+
+       if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
+               return NULL;
+
+       q->tail = (q->tail + 1) % q->ndesc;
+       q->queued--;
+
+       return mt76_dma_get_buf(dev, q, idx, len, info, more);
+}
+
+static void
+mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+       iowrite32(q->head, &q->regs->cpu_idx);
+}
+
+static int
+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
+{
+       dma_addr_t addr;
+       void *buf;
+       int frames = 0;
+       int len = SKB_WITH_OVERHEAD(q->buf_size);
+       int offset = q->buf_offset;
+       int idx;
+       void *(*alloc)(unsigned int fragsz);
+
+       if (napi)
+               alloc = napi_alloc_frag;
+       else
+               alloc = netdev_alloc_frag;
+
+       spin_lock_bh(&q->lock);
+
+       while (q->queued < q->ndesc - 1) {
+               struct mt76_queue_buf qbuf;
+
+               buf = alloc(q->buf_size);
+               if (!buf)
+                       break;
+
+               addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
+               if (dma_mapping_error(dev->dev, addr)) {
+                       skb_free_frag(buf);
+                       break;
+               }
+
+               qbuf.addr = addr + offset;
+               qbuf.len = len - offset;
+               idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
+               frames++;
+       }
+
+       if (frames)
+               mt76_dma_kick_queue(dev, q);
+
+       spin_unlock_bh(&q->lock);
+
+       return frames;
+}
+
+static void
+mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+{
+       void *buf;
+       bool more;
+
+       spin_lock_bh(&q->lock);
+       do {
+               buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
+               if (!buf)
+                       break;
+
+               skb_free_frag(buf);
+       } while (1);
+       spin_unlock_bh(&q->lock);
+}
+
+static void
+mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
+{
+       struct mt76_queue *q = &dev->q_rx[qid];
+       int i;
+
+       for (i = 0; i < q->ndesc; i++)
+               q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+       mt76_dma_rx_cleanup(dev, q);
+       mt76_dma_sync_idx(dev, q);
+       mt76_dma_rx_fill(dev, q, false);
+}
+
+static void
+mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+                 int len, bool more)
+{
+       struct page *page = virt_to_head_page(data);
+       int offset = data - page_address(page);
+       struct sk_buff *skb = q->rx_head;
+
+       offset += q->buf_offset;
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
+                       q->buf_size);
+
+       if (more)
+               return;
+
+       q->rx_head = NULL;
+       dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+}
+
+static int
+mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+{
+       struct sk_buff *skb;
+       unsigned char *data;
+       int len;
+       int done = 0;
+       bool more;
+
+       while (done < budget) {
+               u32 info;
+
+               data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
+               if (!data)
+                       break;
+
+               if (q->rx_head) {
+                       mt76_add_fragment(dev, q, data, len, more);
+                       continue;
+               }
+
+               skb = build_skb(data, q->buf_size);
+               if (!skb) {
+                       skb_free_frag(data);
+                       continue;
+               }
+
+               skb_reserve(skb, q->buf_offset);
+               if (skb->tail + len > skb->end) {
+                       dev_kfree_skb(skb);
+                       continue;
+               }
+
+               if (q == &dev->q_rx[MT_RXQ_MCU]) {
+                       u32 *rxfce = (u32 *) skb->cb;
+                       *rxfce = info;
+               }
+
+               __skb_put(skb, len);
+               done++;
+
+               if (more) {
+                       q->rx_head = skb;
+                       continue;
+               }
+
+               dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+       }
+
+       mt76_dma_rx_fill(dev, q, true);
+       return done;
+}
+
+static int
+mt76_dma_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct mt76_dev *dev;
+       int qid, done;
+
+       dev = container_of(napi->dev, struct mt76_dev, napi_dev);
+       qid = napi - dev->napi;
+
+       done = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget);
+       if (done < budget) {
+               napi_complete(napi);
+               dev->drv->rx_poll_complete(dev, qid);
+       }
+       mt76_rx_complete(dev, qid);
+
+       return done;
+}
+
+static int
+mt76_dma_init(struct mt76_dev *dev)
+{
+       int i;
+
+       init_dummy_netdev(&dev->napi_dev);
+
+       for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+               netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
+                              64);
+               mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
+               skb_queue_head_init(&dev->rx_skb[i]);
+               napi_enable(&dev->napi[i]);
+       }
+
+       return 0;
+}
+
+static const struct mt76_queue_ops mt76_dma_ops = {
+       .init = mt76_dma_init,
+       .alloc = mt76_dma_alloc_queue,
+       .add_buf = mt76_dma_add_buf,
+       .tx_cleanup = mt76_dma_tx_cleanup,
+       .rx_reset = mt76_dma_rx_reset,
+       .kick = mt76_dma_kick_queue,
+};
+
+int mt76_dma_attach(struct mt76_dev *dev)
+{
+       dev->queue_ops = &mt76_dma_ops;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_dma_attach);
+
+void mt76_dma_cleanup(struct mt76_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
+               mt76_dma_tx_cleanup(dev, i, true);
+
+       for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+               netif_napi_del(&dev->napi[i]);
+               mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
+       }
+}
+EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
new file mode 100644 (file)
index 0000000..1dad396
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __MT76_DMA_H
+#define __MT76_DMA_H
+
+#define MT_RING_SIZE                   0x10
+
+#define MT_DMA_CTL_SD_LEN1             GENMASK(13, 0)
+#define MT_DMA_CTL_LAST_SEC1           BIT(14)
+#define MT_DMA_CTL_BURST               BIT(15)
+#define MT_DMA_CTL_SD_LEN0             GENMASK(29, 16)
+#define MT_DMA_CTL_LAST_SEC0           BIT(30)
+#define MT_DMA_CTL_DMA_DONE            BIT(31)
+
+struct mt76_desc {
+       __le32 buf0;
+       __le32 ctrl;
+       __le32 buf1;
+       __le32 info;
+} __packed __aligned(4);
+
+int mt76_dma_attach(struct mt76_dev *dev);
+void mt76_dma_cleanup(struct mt76_dev *dev);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
new file mode 100644 (file)
index 0000000..530e559
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include "mt76.h"
+
+static int
+mt76_get_of_eeprom(struct mt76_dev *dev, int len)
+{
+#if defined(CONFIG_OF) && defined(CONFIG_MTD)
+       struct device_node *np = dev->dev->of_node;
+       struct mtd_info *mtd;
+       const __be32 *list;
+       const char *part;
+       phandle phandle;
+       int offset = 0;
+       int size;
+       size_t retlen;
+       int ret;
+
+       if (!np)
+               return -ENOENT;
+
+       list = of_get_property(np, "mediatek,mtd-eeprom", &size);
+       if (!list)
+               return -ENOENT;
+
+       phandle = be32_to_cpup(list++);
+       if (!phandle)
+               return -ENOENT;
+
+       np = of_find_node_by_phandle(phandle);
+       if (!np)
+               return -EINVAL;
+
+       part = of_get_property(np, "label", NULL);
+       if (!part)
+               part = np->name;
+
+       mtd = get_mtd_device_nm(part);
+       if (IS_ERR(mtd))
+               return PTR_ERR(mtd);
+
+       if (size <= sizeof(*list))
+               return -EINVAL;
+
+       offset = be32_to_cpup(list);
+       ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
+       put_mtd_device(mtd);
+       if (ret)
+               return ret;
+
+       if (retlen < len)
+               return -EINVAL;
+
+       return 0;
+#else
+       return -ENOENT;
+#endif
+}
+
+void
+mt76_eeprom_override(struct mt76_dev *dev)
+{
+#ifdef CONFIG_OF
+       struct device_node *np = dev->dev->of_node;
+       const u8 *mac;
+
+       if (!np)
+               return;
+
+       mac = of_get_mac_address(np);
+       if (mac)
+               memcpy(dev->macaddr, mac, ETH_ALEN);
+#endif
+
+       if (!is_valid_ether_addr(dev->macaddr)) {
+               eth_random_addr(dev->macaddr);
+               dev_info(dev->dev,
+                        "Invalid MAC address, using random address %pM\n",
+                        dev->macaddr);
+       }
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_override);
+
+int
+mt76_eeprom_init(struct mt76_dev *dev, int len)
+{
+       dev->eeprom.size = len;
+       dev->eeprom.data = devm_kzalloc(dev->dev, len, GFP_KERNEL);
+       if (!dev->eeprom.data)
+               return -ENOMEM;
+
+       return !mt76_get_of_eeprom(dev, len);
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
new file mode 100644 (file)
index 0000000..3acf0e1
--- /dev/null
@@ -0,0 +1,393 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/of.h>
+#include "mt76.h"
+
+#define CHAN2G(_idx, _freq) {                  \
+       .band = NL80211_BAND_2GHZ,              \
+       .center_freq = (_freq),                 \
+       .hw_value = (_idx),                     \
+       .max_power = 30,                        \
+}
+
+#define CHAN5G(_idx, _freq) {                  \
+       .band = NL80211_BAND_5GHZ,              \
+       .center_freq = (_freq),                 \
+       .hw_value = (_idx),                     \
+       .max_power = 30,                        \
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+       CHAN2G(1, 2412),
+       CHAN2G(2, 2417),
+       CHAN2G(3, 2422),
+       CHAN2G(4, 2427),
+       CHAN2G(5, 2432),
+       CHAN2G(6, 2437),
+       CHAN2G(7, 2442),
+       CHAN2G(8, 2447),
+       CHAN2G(9, 2452),
+       CHAN2G(10, 2457),
+       CHAN2G(11, 2462),
+       CHAN2G(12, 2467),
+       CHAN2G(13, 2472),
+       CHAN2G(14, 2484),
+};
+
+static const struct ieee80211_channel mt76_channels_5ghz[] = {
+       CHAN5G(36, 5180),
+       CHAN5G(40, 5200),
+       CHAN5G(44, 5220),
+       CHAN5G(48, 5240),
+
+       CHAN5G(52, 5260),
+       CHAN5G(56, 5280),
+       CHAN5G(60, 5300),
+       CHAN5G(64, 5320),
+
+       CHAN5G(100, 5500),
+       CHAN5G(104, 5520),
+       CHAN5G(108, 5540),
+       CHAN5G(112, 5560),
+       CHAN5G(116, 5580),
+       CHAN5G(120, 5600),
+       CHAN5G(124, 5620),
+       CHAN5G(128, 5640),
+       CHAN5G(132, 5660),
+       CHAN5G(136, 5680),
+       CHAN5G(140, 5700),
+
+       CHAN5G(149, 5745),
+       CHAN5G(153, 5765),
+       CHAN5G(157, 5785),
+       CHAN5G(161, 5805),
+       CHAN5G(165, 5825),
+};
+
+static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
+       { .throughput =   0 * 1024, .blink_time = 334 },
+       { .throughput =   1 * 1024, .blink_time = 260 },
+       { .throughput =   5 * 1024, .blink_time = 220 },
+       { .throughput =  10 * 1024, .blink_time = 190 },
+       { .throughput =  20 * 1024, .blink_time = 170 },
+       { .throughput =  50 * 1024, .blink_time = 150 },
+       { .throughput =  70 * 1024, .blink_time = 130 },
+       { .throughput = 100 * 1024, .blink_time = 110 },
+       { .throughput = 200 * 1024, .blink_time =  80 },
+       { .throughput = 300 * 1024, .blink_time =  50 },
+};
+
+static int mt76_led_init(struct mt76_dev *dev)
+{
+       struct device_node *np = dev->dev->of_node;
+       struct ieee80211_hw *hw = dev->hw;
+       int led_pin;
+
+       if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+               return 0;
+
+       snprintf(dev->led_name, sizeof(dev->led_name),
+                "mt76-%s", wiphy_name(hw->wiphy));
+
+       dev->led_cdev.name = dev->led_name;
+       dev->led_cdev.default_trigger =
+               ieee80211_create_tpt_led_trigger(hw,
+                                       IEEE80211_TPT_LEDTRIG_FL_RADIO,
+                                       mt76_tpt_blink,
+                                       ARRAY_SIZE(mt76_tpt_blink));
+
+       np = of_get_child_by_name(np, "led");
+       if (np) {
+               if (!of_property_read_u32(np, "led-sources", &led_pin))
+                       dev->led_pin = led_pin;
+               dev->led_al = of_property_read_bool(np, "led-active-low");
+       }
+
+       return devm_led_classdev_register(dev->dev, &dev->led_cdev);
+}
+
+static int
+mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
+               const struct ieee80211_channel *chan, int n_chan,
+               struct ieee80211_rate *rates, int n_rates, bool vht)
+{
+       struct ieee80211_supported_band *sband = &msband->sband;
+       struct ieee80211_sta_ht_cap *ht_cap;
+       struct ieee80211_sta_vht_cap *vht_cap;
+       void *chanlist;
+       u16 mcs_map;
+       int size;
+
+       size = n_chan * sizeof(*chan);
+       chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
+       if (!chanlist)
+               return -ENOMEM;
+
+       msband->chan = devm_kzalloc(dev->dev, n_chan * sizeof(*msband->chan),
+                                   GFP_KERNEL);
+       if (!msband->chan)
+               return -ENOMEM;
+
+       sband->channels = chanlist;
+       sband->n_channels = n_chan;
+       sband->bitrates = rates;
+       sband->n_bitrates = n_rates;
+       dev->chandef.chan = &sband->channels[0];
+
+       ht_cap = &sband->ht_cap;
+       ht_cap->ht_supported = true;
+       ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+                      IEEE80211_HT_CAP_GRN_FLD |
+                      IEEE80211_HT_CAP_SGI_20 |
+                      IEEE80211_HT_CAP_SGI_40 |
+                      IEEE80211_HT_CAP_TX_STBC |
+                      (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+       ht_cap->mcs.rx_mask[0] = 0xff;
+       ht_cap->mcs.rx_mask[1] = 0xff;
+       ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+       ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+       ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+       if (!vht)
+               return 0;
+
+       vht_cap = &sband->vht_cap;
+       vht_cap->vht_supported = true;
+
+       mcs_map = (IEEE80211_VHT_MCS_SUPPORT_0_9 << (0 * 2)) |
+                 (IEEE80211_VHT_MCS_SUPPORT_0_9 << (1 * 2)) |
+                 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (2 * 2)) |
+                 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (3 * 2)) |
+                 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (4 * 2)) |
+                 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (5 * 2)) |
+                 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (6 * 2)) |
+                 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (7 * 2));
+
+       vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+       vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+       vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
+                       IEEE80211_VHT_CAP_TXSTBC |
+                       IEEE80211_VHT_CAP_RXSTBC_1 |
+                       IEEE80211_VHT_CAP_SHORT_GI_80;
+
+       return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+                  int n_rates)
+{
+       dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->sband_2g.sband;
+
+       return mt76_init_sband(dev, &dev->sband_2g,
+                              mt76_channels_2ghz,
+                              ARRAY_SIZE(mt76_channels_2ghz),
+                              rates, n_rates, false);
+}
+
+static int
+mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+                  int n_rates, bool vht)
+{
+       dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->sband_5g.sband;
+
+       return mt76_init_sband(dev, &dev->sband_5g,
+                              mt76_channels_5ghz,
+                              ARRAY_SIZE(mt76_channels_5ghz),
+                              rates, n_rates, vht);
+}
+
+static void
+mt76_check_sband(struct mt76_dev *dev, int band)
+{
+       struct ieee80211_supported_band *sband = dev->hw->wiphy->bands[band];
+       bool found = false;
+       int i;
+
+       if (!sband)
+               return;
+
+       for (i = 0; i < sband->n_channels; i++) {
+               if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
+                       continue;
+
+               found = true;
+               break;
+       }
+
+       if (found)
+               return;
+
+       sband->n_channels = 0;
+       dev->hw->wiphy->bands[band] = NULL;
+}
+
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+                        struct ieee80211_rate *rates, int n_rates)
+{
+       struct ieee80211_hw *hw = dev->hw;
+       struct wiphy *wiphy = hw->wiphy;
+       int ret;
+
+       dev_set_drvdata(dev->dev, dev);
+
+       spin_lock_init(&dev->lock);
+       spin_lock_init(&dev->cc_lock);
+       INIT_LIST_HEAD(&dev->txwi_cache);
+
+       SET_IEEE80211_DEV(hw, dev->dev);
+       SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+       wiphy->interface_modes =
+               BIT(NL80211_IFTYPE_STATION) |
+               BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+               BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+               BIT(NL80211_IFTYPE_ADHOC);
+
+       wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+
+       hw->txq_data_size = sizeof(struct mt76_txq);
+       hw->max_tx_fragments = 16;
+
+       ieee80211_hw_set(hw, SIGNAL_DBM);
+       ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+       ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+       ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+       ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+       ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+       ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+       ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+       ieee80211_hw_set(hw, TX_AMSDU);
+       ieee80211_hw_set(hw, TX_FRAG_LIST);
+       ieee80211_hw_set(hw, MFP_CAPABLE);
+
+       wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+       if (dev->cap.has_2ghz) {
+               ret = mt76_init_sband_2g(dev, rates, n_rates);
+               if (ret)
+                       return ret;
+       }
+
+       if (dev->cap.has_5ghz) {
+               ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht);
+               if (ret)
+                       return ret;
+       }
+
+       wiphy_read_of_freq_limits(dev->hw->wiphy);
+       mt76_check_sband(dev, NL80211_BAND_2GHZ);
+       mt76_check_sband(dev, NL80211_BAND_5GHZ);
+
+       ret = mt76_led_init(dev);
+       if (ret)
+               return ret;
+
+       return ieee80211_register_hw(hw);
+}
+EXPORT_SYMBOL_GPL(mt76_register_device);
+
+void mt76_unregister_device(struct mt76_dev *dev)
+{
+       struct ieee80211_hw *hw = dev->hw;
+
+       ieee80211_unregister_hw(hw);
+       mt76_tx_free(dev);
+}
+EXPORT_SYMBOL_GPL(mt76_unregister_device);
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
+{
+       if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
+               dev_kfree_skb(skb);
+               return;
+       }
+
+       __skb_queue_tail(&dev->rx_skb[q], skb);
+}
+EXPORT_SYMBOL_GPL(mt76_rx);
+
+void mt76_set_channel(struct mt76_dev *dev)
+{
+       struct ieee80211_hw *hw = dev->hw;
+       struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+       struct mt76_channel_state *state;
+       bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
+
+       if (dev->drv->update_survey)
+               dev->drv->update_survey(dev);
+
+       dev->chandef = *chandef;
+
+       if (!offchannel)
+               dev->main_chan = chandef->chan;
+
+       if (chandef->chan != dev->main_chan) {
+               state = mt76_channel_state(dev, chandef->chan);
+               memset(state, 0, sizeof(*state));
+       }
+}
+EXPORT_SYMBOL_GPL(mt76_set_channel);
+
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+                   struct survey_info *survey)
+{
+       struct mt76_dev *dev = hw->priv;
+       struct mt76_sband *sband;
+       struct ieee80211_channel *chan;
+       struct mt76_channel_state *state;
+       int ret = 0;
+
+       if (idx == 0 && dev->drv->update_survey)
+               dev->drv->update_survey(dev);
+
+       sband = &dev->sband_2g;
+       if (idx >= sband->sband.n_channels) {
+               idx -= sband->sband.n_channels;
+               sband = &dev->sband_5g;
+       }
+
+       if (idx >= sband->sband.n_channels)
+               return -ENOENT;
+
+       chan = &sband->sband.channels[idx];
+       state = mt76_channel_state(dev, chan);
+
+       memset(survey, 0, sizeof(*survey));
+       survey->channel = chan;
+       survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
+       if (chan == dev->main_chan)
+               survey->filled |= SURVEY_INFO_IN_USE;
+
+       spin_lock_bh(&dev->cc_lock);
+       survey->time = div_u64(state->cc_active, 1000);
+       survey->time_busy = div_u64(state->cc_busy, 1000);
+       spin_unlock_bh(&dev->cc_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_get_survey);
+
+void mt76_rx_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
+{
+       struct sk_buff *skb;
+
+       while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL)
+               ieee80211_rx_napi(dev->hw, NULL, skb, &dev->napi[q]);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
new file mode 100644 (file)
index 0000000..09a14de
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+#include "trace.h"
+
+static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
+{
+       u32 val;
+
+       val = ioread32(dev->regs + offset);
+       trace_reg_rr(dev, offset, val);
+
+       return val;
+}
+
+static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+       trace_reg_wr(dev, offset, val);
+       iowrite32(val, dev->regs + offset);
+}
+
+static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+       val |= mt76_mmio_rr(dev, offset) & ~mask;
+       mt76_mmio_wr(dev, offset, val);
+       return val;
+}
+
+static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data,
+                          int len)
+{
+       __iowrite32_copy(dev->regs + offset, data, len >> 2);
+}
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
+{
+       static const struct mt76_bus_ops mt76_mmio_ops = {
+               .rr = mt76_mmio_rr,
+               .rmw = mt76_mmio_rmw,
+               .wr = mt76_mmio_wr,
+               .copy = mt76_mmio_copy,
+       };
+
+       dev->bus = &mt76_mmio_ops;
+       dev->regs = regs;
+}
+EXPORT_SYMBOL_GPL(mt76_mmio_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
new file mode 100644 (file)
index 0000000..aa0880b
--- /dev/null
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76_H
+#define __MT76_H
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/leds.h>
+#include <net/mac80211.h>
+#include "util.h"
+
+#define MT_TX_RING_SIZE     256
+#define MT_MCU_RING_SIZE    32
+#define MT_RX_BUF_SIZE      2048
+
+struct mt76_dev;
+
+struct mt76_bus_ops {
+       u32 (*rr)(struct mt76_dev *dev, u32 offset);
+       void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
+       u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
+       void (*copy)(struct mt76_dev *dev, u32 offset, const void *data,
+                    int len);
+};
+
+enum mt76_txq_id {
+       MT_TXQ_VO = IEEE80211_AC_VO,
+       MT_TXQ_VI = IEEE80211_AC_VI,
+       MT_TXQ_BE = IEEE80211_AC_BE,
+       MT_TXQ_BK = IEEE80211_AC_BK,
+       MT_TXQ_PSD,
+       MT_TXQ_MCU,
+       MT_TXQ_BEACON,
+       MT_TXQ_CAB,
+       __MT_TXQ_MAX
+};
+
+enum mt76_rxq_id {
+       MT_RXQ_MAIN,
+       MT_RXQ_MCU,
+       __MT_RXQ_MAX
+};
+
+struct mt76_queue_buf {
+       dma_addr_t addr;
+       int len;
+};
+
+struct mt76_queue_entry {
+       union {
+               void *buf;
+               struct sk_buff *skb;
+       };
+       struct mt76_txwi_cache *txwi;
+       bool schedule;
+};
+
+struct mt76_queue_regs {
+       u32 desc_base;
+       u32 ring_size;
+       u32 cpu_idx;
+       u32 dma_idx;
+} __packed __aligned(4);
+
+struct mt76_queue {
+       struct mt76_queue_regs __iomem *regs;
+
+       spinlock_t lock;
+       struct mt76_queue_entry *entry;
+       struct mt76_desc *desc;
+
+       struct list_head swq;
+       int swq_queued;
+
+       u16 head;
+       u16 tail;
+       int ndesc;
+       int queued;
+       int buf_size;
+
+       u8 buf_offset;
+       u8 hw_idx;
+
+       dma_addr_t desc_dma;
+       struct sk_buff *rx_head;
+};
+
+struct mt76_queue_ops {
+       int (*init)(struct mt76_dev *dev);
+
+       int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
+
+       int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
+                      struct mt76_queue_buf *buf, int nbufs, u32 info,
+                      struct sk_buff *skb, void *txwi);
+
+       void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+                        int *len, u32 *info, bool *more);
+
+       void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
+
+       void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
+                          bool flush);
+
+       void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
+};
+
+struct mt76_wcid {
+       u8 idx;
+       u8 hw_key_idx;
+
+       __le16 tx_rate;
+       bool tx_rate_set;
+       u8 tx_rate_nss;
+       s8 max_txpwr_adj;
+};
+
+struct mt76_txq {
+       struct list_head list;
+       struct mt76_queue *hwq;
+       struct mt76_wcid *wcid;
+
+       struct sk_buff_head retry_q;
+
+       u16 agg_ssn;
+       bool send_bar;
+       bool aggr;
+};
+
+struct mt76_txwi_cache {
+       u32 txwi[8];
+       dma_addr_t dma_addr;
+       struct list_head list;
+};
+
+enum {
+       MT76_STATE_INITIALIZED,
+       MT76_STATE_RUNNING,
+       MT76_SCANNING,
+       MT76_RESET,
+};
+
+struct mt76_hw_cap {
+       bool has_2ghz;
+       bool has_5ghz;
+};
+
+struct mt76_driver_ops {
+       u16 txwi_size;
+
+       void (*update_survey)(struct mt76_dev *dev);
+
+       int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
+                             struct sk_buff *skb, struct mt76_queue *q,
+                             struct mt76_wcid *wcid,
+                             struct ieee80211_sta *sta, u32 *tx_info);
+
+       void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+                               struct mt76_queue_entry *e, bool flush);
+
+       void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
+                      struct sk_buff *skb);
+
+       void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
+};
+
+struct mt76_channel_state {
+       u64 cc_active;
+       u64 cc_busy;
+};
+
+struct mt76_sband {
+       struct ieee80211_supported_band sband;
+       struct mt76_channel_state *chan;
+};
+
+struct mt76_dev {
+       struct ieee80211_hw *hw;
+       struct cfg80211_chan_def chandef;
+       struct ieee80211_channel *main_chan;
+
+       spinlock_t lock;
+       spinlock_t cc_lock;
+       const struct mt76_bus_ops *bus;
+       const struct mt76_driver_ops *drv;
+       void __iomem *regs;
+       struct device *dev;
+
+       struct net_device napi_dev;
+       struct napi_struct napi[__MT_RXQ_MAX];
+       struct sk_buff_head rx_skb[__MT_RXQ_MAX];
+
+       struct list_head txwi_cache;
+       struct mt76_queue q_tx[__MT_TXQ_MAX];
+       struct mt76_queue q_rx[__MT_RXQ_MAX];
+       const struct mt76_queue_ops *queue_ops;
+
+       u8 macaddr[ETH_ALEN];
+       u32 rev;
+       unsigned long state;
+
+       struct mt76_sband sband_2g;
+       struct mt76_sband sband_5g;
+       struct debugfs_blob_wrapper eeprom;
+       struct debugfs_blob_wrapper otp;
+       struct mt76_hw_cap cap;
+
+       u32 debugfs_reg;
+
+       struct led_classdev led_cdev;
+       char led_name[32];
+       bool led_al;
+       u8 led_pin;
+};
+
+enum mt76_phy_type {
+       MT_PHY_TYPE_CCK,
+       MT_PHY_TYPE_OFDM,
+       MT_PHY_TYPE_HT,
+       MT_PHY_TYPE_HT_GF,
+       MT_PHY_TYPE_VHT,
+};
+
+struct mt76_rate_power {
+       union {
+               struct {
+                       s8 cck[4];
+                       s8 ofdm[8];
+                       s8 ht[16];
+                       s8 vht[10];
+               };
+               s8 all[38];
+       };
+};
+
+#define mt76_rr(dev, ...)      (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr(dev, ...)      (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_rmw(dev, ...)     (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
+
+#define mt76_set(dev, offset, val)     mt76_rmw(dev, offset, 0, val)
+#define mt76_clear(dev, offset, val)   mt76_rmw(dev, offset, val, 0)
+
+#define mt76_get_field(_dev, _reg, _field)             \
+       FIELD_GET(_field, mt76_rr(dev, _reg))
+
+#define mt76_rmw_field(_dev, _reg, _field, _val)       \
+       mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+#define mt76_hw(dev) (dev)->mt76.hw
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+                int timeout);
+
+#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+                     int timeout);
+
+#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+
+static inline u16 mt76_chip(struct mt76_dev *dev)
+{
+       return dev->rev >> 16;
+}
+
+static inline u16 mt76_rev(struct mt76_dev *dev)
+{
+       return dev->rev & 0xffff;
+}
+
+#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
+#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
+
+#define mt76_init_queues(dev)          (dev)->mt76.queue_ops->init(&((dev)->mt76))
+#define mt76_queue_alloc(dev, ...)     (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_add_buf(dev, ...)   (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_rx_reset(dev, ...)  (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_tx_cleanup(dev, ...)        (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_kick(dev, ...)      (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
+
+static inline struct mt76_channel_state *
+mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
+{
+       struct mt76_sband *msband;
+       int idx;
+
+       if (c->band == NL80211_BAND_2GHZ)
+               msband = &dev->sband_2g;
+       else
+               msband = &dev->sband_5g;
+
+       idx = c - &msband->sband.channels[0];
+       return &msband->chan[idx];
+}
+
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+                        struct ieee80211_rate *rates, int n_rates);
+void mt76_unregister_device(struct mt76_dev *dev);
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
+
+int mt76_eeprom_init(struct mt76_dev *dev, int len);
+void mt76_eeprom_override(struct mt76_dev *dev);
+
+static inline struct ieee80211_txq *
+mtxq_to_txq(struct mt76_txq *mtxq)
+{
+       void *ptr = mtxq;
+
+       return container_of(ptr, struct ieee80211_txq, drv_priv);
+}
+
+int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+                     struct sk_buff *skb, struct mt76_wcid *wcid,
+                     struct ieee80211_sta *sta);
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
+void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+            struct mt76_wcid *wcid, struct sk_buff *skb);
+void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
+void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+                        bool send_bar);
+void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
+void mt76_txq_schedule_all(struct mt76_dev *dev);
+void mt76_release_buffered_frames(struct ieee80211_hw *hw,
+                                 struct ieee80211_sta *sta,
+                                 u16 tids, int nframes,
+                                 enum ieee80211_frame_release_type reason,
+                                 bool more_data);
+void mt76_set_channel(struct mt76_dev *dev);
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+                   struct survey_info *survey);
+
+/* internal */
+void mt76_tx_free(struct mt76_dev *dev);
+void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
+void mt76_rx_complete(struct mt76_dev *dev, enum mt76_rxq_id q);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
new file mode 100644 (file)
index 0000000..a12dfce
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_H
+#define __MT76x2_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/kfifo.h>
+
+#define MT7662_FIRMWARE                "mt7662.bin"
+#define MT7662_ROM_PATCH       "mt7662_rom_patch.bin"
+#define MT7662_EEPROM_SIZE     512
+
+#define MT76x2_RX_RING_SIZE    256
+#define MT_RX_HEADROOM         32
+
+#define MT_MAX_CHAINS          2
+
+#define MT_CALIBRATE_INTERVAL  HZ
+
+#include "mt76.h"
+#include "mt76x2_regs.h"
+#include "mt76x2_mac.h"
+#include "mt76x2_dfs.h"
+
+struct mt76x2_mcu {
+       struct mutex mutex;
+
+       wait_queue_head_t wait;
+       struct sk_buff_head res_q;
+
+       u32 msg_seq;
+};
+
+struct mt76x2_rx_freq_cal {
+       s8 high_gain[MT_MAX_CHAINS];
+       s8 rssi_offset[MT_MAX_CHAINS];
+       s8 lna_gain;
+       u32 mcu_gain;
+};
+
+struct mt76x2_calibration {
+       struct mt76x2_rx_freq_cal rx;
+
+       u8 agc_gain_init[MT_MAX_CHAINS];
+       u8 agc_gain_cur[MT_MAX_CHAINS];
+
+       int avg_rssi[MT_MAX_CHAINS];
+       int avg_rssi_all;
+
+       s8 agc_gain_adjust;
+       s8 low_gain;
+
+       u8 temp;
+
+       bool init_cal_done;
+       bool tssi_cal_done;
+       bool tssi_comp_pending;
+       bool dpd_cal_done;
+       bool channel_cal_done;
+};
+
+struct mt76x2_dev {
+       struct mt76_dev mt76; /* must be first */
+
+       struct mac_address macaddr_list[8];
+
+       struct mutex mutex;
+
+       const u16 *beacon_offsets;
+       unsigned long wcid_mask[128 / BITS_PER_LONG];
+
+       int txpower_conf;
+       int txpower_cur;
+
+       u8 txdone_seq;
+       DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x2_tx_status);
+
+       struct mt76x2_mcu mcu;
+       struct sk_buff *rx_head;
+
+       struct tasklet_struct tx_tasklet;
+       struct tasklet_struct pre_tbtt_tasklet;
+       struct delayed_work cal_work;
+       struct delayed_work mac_work;
+
+       u32 aggr_stats[32];
+
+       struct mt76_wcid global_wcid;
+       struct mt76_wcid __rcu *wcid[128];
+
+       spinlock_t irq_lock;
+       u32 irqmask;
+
+       struct sk_buff *beacons[8];
+       u8 beacon_mask;
+       u8 beacon_data_mask;
+
+       u32 rev;
+       u32 rxfilter;
+
+       u16 chainmask;
+
+       struct mt76x2_calibration cal;
+
+       s8 target_power;
+       s8 target_power_delta[2];
+       struct mt76_rate_power rate_power;
+       bool enable_tpc;
+
+       u8 coverage_class;
+       u8 slottime;
+
+       struct mt76x2_dfs_pattern_detector dfs_pd;
+};
+
+struct mt76x2_vif {
+       u8 idx;
+
+       struct mt76_wcid group_wcid;
+};
+
+struct mt76x2_sta {
+       struct mt76_wcid wcid; /* must be first */
+
+       struct mt76x2_tx_status status;
+       int n_frames;
+};
+
+static inline bool is_mt7612(struct mt76x2_dev *dev)
+{
+       return (dev->rev >> 16) == 0x7612;
+}
+
+void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
+
+static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
+{
+       mt76x2_set_irq_mask(dev, 0, mask);
+}
+
+static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
+{
+       mt76x2_set_irq_mask(dev, mask, 0);
+}
+
+extern const struct ieee80211_ops mt76x2_ops;
+
+struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
+int mt76x2_register_device(struct mt76x2_dev *dev);
+void mt76x2_init_debugfs(struct mt76x2_dev *dev);
+
+irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
+void mt76x2_phy_power_on(struct mt76x2_dev *dev);
+int mt76x2_init_hardware(struct mt76x2_dev *dev);
+void mt76x2_stop_hardware(struct mt76x2_dev *dev);
+int mt76x2_eeprom_init(struct mt76x2_dev *dev);
+int mt76x2_apply_calibration_data(struct mt76x2_dev *dev, int channel);
+void mt76x2_set_tx_ackto(struct mt76x2_dev *dev);
+
+int mt76x2_phy_start(struct mt76x2_dev *dev);
+int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
+                        struct cfg80211_chan_def *chandef);
+int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
+void mt76x2_phy_calibrate(struct work_struct *work);
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
+
+int mt76x2_mcu_init(struct mt76x2_dev *dev);
+int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+                          u8 bw_index, bool scan);
+int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on);
+int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+                      u8 channel);
+int mt76x2_mcu_cleanup(struct mt76x2_dev *dev);
+
+int mt76x2_dma_init(struct mt76x2_dev *dev);
+void mt76x2_dma_cleanup(struct mt76x2_dev *dev);
+
+void mt76x2_cleanup(struct mt76x2_dev *dev);
+
+int mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
+                       struct sk_buff *skb, int cmd, int seq);
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+              struct sk_buff *skb);
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb);
+int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+                         struct sk_buff *skb, struct mt76_queue *q,
+                         struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+                         u32 *tx_info);
+void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+                           struct mt76_queue_entry *e, bool flush);
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg);
+
+void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+                        struct sk_buff *skb);
+
+void mt76x2_update_channel(struct mt76_dev *mdev);
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+                              const struct ieee80211_tx_rate *rate);
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
new file mode 100644 (file)
index 0000000..2629779
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_trace.h"
+
+void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->irq_lock, flags);
+       dev->irqmask &= ~clear;
+       dev->irqmask |= set;
+       mt76_wr(dev, MT_INT_MASK_CSR, dev->irqmask);
+       spin_unlock_irqrestore(&dev->irq_lock, flags);
+}
+
+void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+       mt76x2_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance)
+{
+       struct mt76x2_dev *dev = dev_instance;
+       u32 intr;
+
+       intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+       mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+       if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+               return IRQ_NONE;
+
+       trace_dev_irq(dev, intr, dev->irqmask);
+
+       intr &= dev->irqmask;
+
+       if (intr & MT_INT_TX_DONE_ALL) {
+               mt76x2_irq_disable(dev, MT_INT_TX_DONE_ALL);
+               tasklet_schedule(&dev->tx_tasklet);
+       }
+
+       if (intr & MT_INT_RX_DONE(0)) {
+               mt76x2_irq_disable(dev, MT_INT_RX_DONE(0));
+               napi_schedule(&dev->mt76.napi[0]);
+       }
+
+       if (intr & MT_INT_RX_DONE(1)) {
+               mt76x2_irq_disable(dev, MT_INT_RX_DONE(1));
+               napi_schedule(&dev->mt76.napi[1]);
+       }
+
+       if (intr & MT_INT_PRE_TBTT)
+               tasklet_schedule(&dev->pre_tbtt_tasklet);
+
+       /* send buffered multicast frames now */
+       if (intr & MT_INT_TBTT)
+               mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+
+       if (intr & MT_INT_TX_STAT) {
+               mt76x2_mac_poll_tx_status(dev, true);
+               tasklet_schedule(&dev->tx_tasklet);
+       }
+
+       if (intr & MT_INT_GPTIMER) {
+               mt76x2_irq_disable(dev, MT_INT_GPTIMER);
+               tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
+       }
+
+       return IRQ_HANDLED;
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
new file mode 100644 (file)
index 0000000..612feb5
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include "mt76x2.h"
+
+static int
+mt76x2_ampdu_stat_read(struct seq_file *file, void *data)
+{
+       struct mt76x2_dev *dev = file->private;
+       int i, j;
+
+       for (i = 0; i < 4; i++) {
+               seq_puts(file, "Length: ");
+               for (j = 0; j < 8; j++)
+                       seq_printf(file, "%8d | ", i * 8 + j + 1);
+               seq_puts(file, "\n");
+               seq_puts(file, "Count:  ");
+               for (j = 0; j < 8; j++)
+                       seq_printf(file, "%8d | ", dev->aggr_stats[i * 8 + j]);
+               seq_puts(file, "\n");
+               seq_puts(file, "--------");
+               for (j = 0; j < 8; j++)
+                       seq_puts(file, "-----------");
+               seq_puts(file, "\n");
+       }
+
+       return 0;
+}
+
+static int
+mt76x2_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+       return single_open(f, mt76x2_ampdu_stat_read, inode->i_private);
+}
+
+static void
+seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len)
+{
+       int i;
+
+       seq_printf(file, "%10s:", str);
+       for (i = 0; i < len; i++)
+               seq_printf(file, " %2d", val[i]);
+       seq_puts(file, "\n");
+}
+
+static int read_txpower(struct seq_file *file, void *data)
+{
+       struct mt76x2_dev *dev = dev_get_drvdata(file->private);
+
+       seq_printf(file, "Target power: %d\n", dev->target_power);
+
+       seq_puts_array(file, "Delta", dev->target_power_delta,
+                      ARRAY_SIZE(dev->target_power_delta));
+       seq_puts_array(file, "CCK", dev->rate_power.cck,
+                      ARRAY_SIZE(dev->rate_power.cck));
+       seq_puts_array(file, "OFDM", dev->rate_power.ofdm,
+                      ARRAY_SIZE(dev->rate_power.ofdm));
+       seq_puts_array(file, "HT", dev->rate_power.ht,
+                      ARRAY_SIZE(dev->rate_power.ht));
+       seq_puts_array(file, "VHT", dev->rate_power.vht,
+                      ARRAY_SIZE(dev->rate_power.vht));
+       return 0;
+}
+
+static const struct file_operations fops_ampdu_stat = {
+       .open = mt76x2_ampdu_stat_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int
+mt76x2_dfs_stat_read(struct seq_file *file, void *data)
+{
+       int i;
+       struct mt76x2_dev *dev = file->private;
+       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+       for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+               seq_printf(file, "engine: %d\n", i);
+               seq_printf(file, "  hw pattern detected:\t%d\n",
+                          dfs_pd->stats[i].hw_pattern);
+               seq_printf(file, "  hw pulse discarded:\t%d\n",
+                          dfs_pd->stats[i].hw_pulse_discarded);
+       }
+
+       return 0;
+}
+
+static int
+mt76x2_dfs_stat_open(struct inode *inode, struct file *f)
+{
+       return single_open(f, mt76x2_dfs_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_dfs_stat = {
+       .open = mt76x2_dfs_stat_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+void mt76x2_init_debugfs(struct mt76x2_dev *dev)
+{
+       struct dentry *dir;
+
+       dir = mt76_register_debugfs(&dev->mt76);
+       if (!dir)
+               return;
+
+       debugfs_create_u8("temperature", S_IRUSR, dir, &dev->cal.temp);
+       debugfs_create_bool("tpc", S_IRUSR | S_IWUSR, dir, &dev->enable_tpc);
+
+       debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+       debugfs_create_file("dfs_stats", S_IRUSR, dir, dev, &fops_dfs_stat);
+       debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
+                                   read_txpower);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
new file mode 100644 (file)
index 0000000..5b452a5
--- /dev/null
@@ -0,0 +1,493 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+#define RADAR_SPEC(m, len, el, eh, wl, wh,             \
+                  w_tolerance, tl, th, t_tolerance,    \
+                  bl, bh, event_exp, power_jmp)        \
+{                                                      \
+       .mode = m,                                      \
+       .avg_len = len,                                 \
+       .e_low = el,                                    \
+       .e_high = eh,                                   \
+       .w_low = wl,                                    \
+       .w_high = wh,                                   \
+       .w_margin = w_tolerance,                        \
+       .t_low = tl,                                    \
+       .t_high = th,                                   \
+       .t_margin = t_tolerance,                        \
+       .b_low = bl,                                    \
+       .b_high = bh,                                   \
+       .event_expiration = event_exp,                  \
+       .pwr_jmp = power_jmp                            \
+}
+
+static const struct mt76x2_radar_specs etsi_radar_specs[] = {
+       /* 20MHz */
+       RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+                  0x7fffffff, 0x155cc0, 0x19cc),
+       RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+                  0x7fffffff, 0x155cc0, 0x19cc),
+       RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+                  0x7fffffff, 0x155cc0, 0x19dd),
+       RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+                  0x7fffffff, 0x2191c0, 0x15cc),
+       /* 40MHz */
+       RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+                  0x7fffffff, 0x155cc0, 0x19cc),
+       RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+                  0x7fffffff, 0x155cc0, 0x19cc),
+       RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+                  0x7fffffff, 0x155cc0, 0x19dd),
+       RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+                  0x7fffffff, 0x2191c0, 0x15cc),
+       /* 80MHz */
+       RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+                  0x7fffffff, 0x155cc0, 0x19cc),
+       RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+                  0x7fffffff, 0x155cc0, 0x19cc),
+       RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+                  0x7fffffff, 0x155cc0, 0x19dd),
+       RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+                  0x7fffffff, 0x2191c0, 0x15cc)
+};
+
+static const struct mt76x2_radar_specs fcc_radar_specs[] = {
+       /* 20MHz */
+       RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+                  0x7fffffff, 0xfe808, 0x13dc),
+       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+                  0x7fffffff, 0xfe808, 0x19dd),
+       RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+                  0x7fffffff, 0xfe808, 0x12cc),
+       RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+                  0x3938700, 0x57bcf00, 0x1289),
+       /* 40MHz */
+       RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+                  0x7fffffff, 0xfe808, 0x13dc),
+       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+                  0x7fffffff, 0xfe808, 0x19dd),
+       RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+                  0x7fffffff, 0xfe808, 0x12cc),
+       RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+                  0x3938700, 0x57bcf00, 0x1289),
+       /* 80MHz */
+       RADAR_SPEC(0, 8, 2, 14, 106, 150, 15, 2900, 80100, 15, 0,
+                  0x7fffffff, 0xfe808, 0x16cc),
+       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+                  0x7fffffff, 0xfe808, 0x19dd),
+       RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+                  0x7fffffff, 0xfe808, 0x12cc),
+       RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+                  0x3938700, 0x57bcf00, 0x1289)
+};
+
+static const struct mt76x2_radar_specs jp_w56_radar_specs[] = {
+       /* 20MHz */
+       RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+                  0x7fffffff, 0x14c080, 0x13dc),
+       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+                  0x7fffffff, 0x14c080, 0x19dd),
+       RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+                  0x7fffffff, 0x14c080, 0x12cc),
+       RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+                  0x3938700, 0X57bcf00, 0x1289),
+       /* 40MHz */
+       RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+                  0x7fffffff, 0x14c080, 0x13dc),
+       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+                  0x7fffffff, 0x14c080, 0x19dd),
+       RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+                  0x7fffffff, 0x14c080, 0x12cc),
+       RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+                  0x3938700, 0X57bcf00, 0x1289),
+       /* 80MHz */
+       RADAR_SPEC(0, 8, 2, 9, 106, 150, 15, 2900, 80100, 15, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+                  0x7fffffff, 0x14c080, 0x19dd),
+       RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+                  0x7fffffff, 0x14c080, 0x12cc),
+       RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+                  0x3938700, 0X57bcf00, 0x1289)
+};
+
+static const struct mt76x2_radar_specs jp_w53_radar_specs[] = {
+       /* 20MHz */
+       RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       { 0 },
+       RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       { 0 },
+       /* 40MHz */
+       RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       { 0 },
+       RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       { 0 },
+       /* 80MHz */
+       RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       { 0 },
+       RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+                  0x7fffffff, 0x14c080, 0x16cc),
+       { 0 }
+};
+
+static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
+                                            u8 enable)
+{
+       u32 data;
+
+       data = (1 << 1) | enable;
+       mt76_wr(dev, MT_BBP(DFS, 36), data);
+}
+
+static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
+{
+       bool ret = false;
+       u32 current_ts, delta_ts;
+       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+       current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER);
+       delta_ts = current_ts - dfs_pd->chirp_pulse_ts;
+       dfs_pd->chirp_pulse_ts = current_ts;
+
+       /* 12 sec */
+       if (delta_ts <= (12 * (1 << 20))) {
+               if (++dfs_pd->chirp_pulse_cnt > 8)
+                       ret = true;
+       } else {
+               dfs_pd->chirp_pulse_cnt = 1;
+       }
+
+       return ret;
+}
+
+static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev,
+                                   struct mt76x2_dfs_hw_pulse *pulse)
+{
+       u32 data;
+
+       /* select channel */
+       data = (MT_DFS_CH_EN << 16) | pulse->engine;
+       mt76_wr(dev, MT_BBP(DFS, 0), data);
+
+       /* reported period */
+       pulse->period = mt76_rr(dev, MT_BBP(DFS, 19));
+
+       /* reported width */
+       pulse->w1 = mt76_rr(dev, MT_BBP(DFS, 20));
+       pulse->w2 = mt76_rr(dev, MT_BBP(DFS, 23));
+
+       /* reported burst number */
+       pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
+}
+
+static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
+                                     struct mt76x2_dfs_hw_pulse *pulse)
+{
+       bool ret = false;
+
+       if (!pulse->period || !pulse->w1)
+               return false;
+
+       switch (dev->dfs_pd.region) {
+       case NL80211_DFS_FCC:
+               if (pulse->engine > 3)
+                       break;
+
+               if (pulse->engine == 3) {
+                       ret = mt76x2_dfs_check_chirp(dev);
+                       break;
+               }
+
+               /* check short pulse*/
+               if (pulse->w1 < 120)
+                       ret = (pulse->period >= 2900 &&
+                              (pulse->period <= 4700 ||
+                               pulse->period >= 6400) &&
+                              (pulse->period <= 6800 ||
+                               pulse->period >= 10200) &&
+                              pulse->period <= 61600);
+               else if (pulse->w1 < 130) /* 120 - 130 */
+                       ret = (pulse->period >= 2900 &&
+                              pulse->period <= 61600);
+               else
+                       ret = (pulse->period >= 3500 &&
+                              pulse->period <= 10100);
+               break;
+       case NL80211_DFS_ETSI:
+               if (pulse->engine >= 3)
+                       break;
+
+               ret = (pulse->period >= 4900 &&
+                      (pulse->period <= 10200 ||
+                       pulse->period >= 12400) &&
+                      pulse->period <= 100100);
+               break;
+       case NL80211_DFS_JP:
+               if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+                   dev->mt76.chandef.chan->center_freq <= 5350) {
+                       /* JPW53 */
+                       if (pulse->w1 <= 130)
+                               ret = (pulse->period >= 28360 &&
+                                      (pulse->period <= 28700 ||
+                                       pulse->period >= 76900) &&
+                                      pulse->period <= 76940);
+                       break;
+               }
+
+               if (pulse->engine > 3)
+                       break;
+
+               if (pulse->engine == 3) {
+                       ret = mt76x2_dfs_check_chirp(dev);
+                       break;
+               }
+
+               /* check short pulse*/
+               if (pulse->w1 < 120)
+                       ret = (pulse->period >= 2900 &&
+                              (pulse->period <= 4700 ||
+                               pulse->period >= 6400) &&
+                              (pulse->period <= 6800 ||
+                               pulse->period >= 27560) &&
+                              (pulse->period <= 27960 ||
+                               pulse->period >= 28360) &&
+                              (pulse->period <= 28700 ||
+                               pulse->period >= 79900) &&
+                              pulse->period <= 80100);
+               else if (pulse->w1 < 130) /* 120 - 130 */
+                       ret = (pulse->period >= 2900 &&
+                              (pulse->period <= 10100 ||
+                               pulse->period >= 27560) &&
+                              (pulse->period <= 27960 ||
+                               pulse->period >= 28360) &&
+                              (pulse->period <= 28700 ||
+                               pulse->period >= 79900) &&
+                              pulse->period <= 80100);
+               else
+                       ret = (pulse->period >= 3900 &&
+                              pulse->period <= 10100);
+               break;
+       case NL80211_DFS_UNSET:
+       default:
+               return false;
+       }
+
+       return ret;
+}
+
+static void mt76x2_dfs_tasklet(unsigned long arg)
+{
+       struct mt76x2_dev *dev = (struct mt76x2_dev *)arg;
+       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+       u32 engine_mask;
+       int i;
+
+       if (test_bit(MT76_SCANNING, &dev->mt76.state))
+               goto out;
+
+       engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
+       if (!(engine_mask & 0xf))
+               goto out;
+
+       for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+               struct mt76x2_dfs_hw_pulse pulse;
+
+               if (!(engine_mask & (1 << i)))
+                       continue;
+
+               pulse.engine = i;
+               mt76x2_dfs_get_hw_pulse(dev, &pulse);
+
+               if (!mt76x2_dfs_check_hw_pulse(dev, &pulse)) {
+                       dfs_pd->stats[i].hw_pulse_discarded++;
+                       continue;
+               }
+
+               /* hw detector rx radar pattern */
+               dfs_pd->stats[i].hw_pattern++;
+               ieee80211_radar_detected(dev->mt76.hw);
+
+               /* reset hw detector */
+               mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+               return;
+       }
+
+       /* reset hw detector */
+       mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+out:
+       mt76x2_irq_enable(dev, MT_INT_GPTIMER);
+}
+
+static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
+{
+       u32 data;
+       u8 i, shift;
+       const struct mt76x2_radar_specs *radar_specs;
+
+       switch (dev->mt76.chandef.width) {
+       case NL80211_CHAN_WIDTH_40:
+               shift = MT_DFS_NUM_ENGINES;
+               break;
+       case NL80211_CHAN_WIDTH_80:
+               shift = 2 * MT_DFS_NUM_ENGINES;
+               break;
+       default:
+               shift = 0;
+               break;
+       }
+
+       switch (dev->dfs_pd.region) {
+       case NL80211_DFS_FCC:
+               radar_specs = &fcc_radar_specs[shift];
+               break;
+       case NL80211_DFS_ETSI:
+               radar_specs = &etsi_radar_specs[shift];
+               break;
+       case NL80211_DFS_JP:
+               if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+                   dev->mt76.chandef.chan->center_freq <= 5350)
+                       radar_specs = &jp_w53_radar_specs[shift];
+               else
+                       radar_specs = &jp_w56_radar_specs[shift];
+               break;
+       case NL80211_DFS_UNSET:
+       default:
+               return;
+       }
+
+       data = (MT_DFS_VGA_MASK << 16) |
+              (MT_DFS_PWR_GAIN_OFFSET << 12) |
+              (MT_DFS_PWR_DOWN_TIME << 8) |
+              (MT_DFS_SYM_ROUND << 4) |
+              (MT_DFS_DELTA_DELAY & 0xf);
+       mt76_wr(dev, MT_BBP(DFS, 2), data);
+
+       data = (MT_DFS_RX_PE_MASK << 16) | MT_DFS_PKT_END_MASK;
+       mt76_wr(dev, MT_BBP(DFS, 3), data);
+
+       for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+               /* configure engine */
+               mt76_wr(dev, MT_BBP(DFS, 0), i);
+
+               /* detection mode + avg_len */
+               data = ((radar_specs[i].avg_len & 0x1ff) << 16) |
+                      (radar_specs[i].mode & 0xf);
+               mt76_wr(dev, MT_BBP(DFS, 4), data);
+
+               /* dfs energy */
+               data = ((radar_specs[i].e_high & 0x0fff) << 16) |
+                      (radar_specs[i].e_low & 0x0fff);
+               mt76_wr(dev, MT_BBP(DFS, 5), data);
+
+               /* dfs period */
+               mt76_wr(dev, MT_BBP(DFS, 7), radar_specs[i].t_low);
+               mt76_wr(dev, MT_BBP(DFS, 9), radar_specs[i].t_high);
+
+               /* dfs burst */
+               mt76_wr(dev, MT_BBP(DFS, 11), radar_specs[i].b_low);
+               mt76_wr(dev, MT_BBP(DFS, 13), radar_specs[i].b_high);
+
+               /* dfs width */
+               data = ((radar_specs[i].w_high & 0x0fff) << 16) |
+                      (radar_specs[i].w_low & 0x0fff);
+               mt76_wr(dev, MT_BBP(DFS, 14), data);
+
+               /* dfs margins */
+               data = (radar_specs[i].w_margin << 16) |
+                      radar_specs[i].t_margin;
+               mt76_wr(dev, MT_BBP(DFS, 15), data);
+
+               /* dfs event expiration */
+               mt76_wr(dev, MT_BBP(DFS, 17), radar_specs[i].event_expiration);
+
+               /* dfs pwr adj */
+               mt76_wr(dev, MT_BBP(DFS, 30), radar_specs[i].pwr_jmp);
+       }
+
+       /* reset status */
+       mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+       mt76_wr(dev, MT_BBP(DFS, 36), 0x3);
+
+       /* enable detection*/
+       mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
+       mt76_wr(dev, 0x212c, 0x0c350001);
+}
+
+void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
+{
+       u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
+
+       agc_r8 = mt76_rr(dev, MT_BBP(AGC, 8));
+       agc_r4 = mt76_rr(dev, MT_BBP(AGC, 4));
+
+       val_r8 = (agc_r8 & 0x00007e00) >> 9;
+       val_r4 = agc_r4 & ~0x1f000000;
+       val_r4 += (((val_r8 + 1) >> 1) << 24);
+       mt76_wr(dev, MT_BBP(AGC, 4), val_r4);
+
+       dfs_r31 = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, val_r4);
+       dfs_r31 += val_r8;
+       dfs_r31 -= (agc_r8 & 0x00000038) >> 3;
+       dfs_r31 = (dfs_r31 << 16) | 0x00000307;
+       mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31);
+
+       mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
+}
+
+void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
+{
+       struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+
+       tasklet_kill(&dev->dfs_pd.dfs_tasklet);
+       if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
+               mt76x2_dfs_set_bbp_params(dev);
+               /* enable debug mode */
+               mt76x2_dfs_set_capture_mode_ctrl(dev, true);
+
+               mt76x2_irq_enable(dev, MT_INT_GPTIMER);
+               mt76_rmw_field(dev, MT_INT_TIMER_EN,
+                              MT_INT_TIMER_EN_GP_TIMER_EN, 1);
+       } else {
+               /* disable hw detector */
+               mt76_wr(dev, MT_BBP(DFS, 0), 0);
+               /* clear detector status */
+               mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+               mt76_wr(dev, 0x212c, 0);
+
+               mt76x2_irq_disable(dev, MT_INT_GPTIMER);
+               mt76_rmw_field(dev, MT_INT_TIMER_EN,
+                              MT_INT_TIMER_EN_GP_TIMER_EN, 0);
+       }
+}
+
+void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
+{
+       struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+       dfs_pd->region = NL80211_DFS_UNSET;
+       tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet,
+                    (unsigned long)dev);
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
new file mode 100644 (file)
index 0000000..9ac69b6
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_DFS_H
+#define __MT76x2_DFS_H
+
+#include <linux/types.h>
+#include <linux/nl80211.h>
+
+#define MT_DFS_GP_INTERVAL             (10 << 4) /* 64 us unit */
+#define MT_DFS_NUM_ENGINES             4
+
+/* bbp params */
+#define MT_DFS_SYM_ROUND               0
+#define MT_DFS_DELTA_DELAY             2
+#define MT_DFS_VGA_MASK                        0
+#define MT_DFS_PWR_GAIN_OFFSET         3
+#define MT_DFS_PWR_DOWN_TIME           0xf
+#define MT_DFS_RX_PE_MASK              0xff
+#define MT_DFS_PKT_END_MASK            0
+#define MT_DFS_CH_EN                   0xf
+
+struct mt76x2_radar_specs {
+       u8 mode;
+       u16 avg_len;
+       u16 e_low;
+       u16 e_high;
+       u16 w_low;
+       u16 w_high;
+       u16 w_margin;
+       u32 t_low;
+       u32 t_high;
+       u16 t_margin;
+       u32 b_low;
+       u32 b_high;
+       u32 event_expiration;
+       u16 pwr_jmp;
+};
+
+struct mt76x2_dfs_hw_pulse {
+       u8 engine;
+       u32 period;
+       u32 w1;
+       u32 w2;
+       u32 burst;
+};
+
+struct mt76x2_dfs_engine_stats {
+       u32 hw_pattern;
+       u32 hw_pulse_discarded;
+};
+
+struct mt76x2_dfs_pattern_detector {
+       enum nl80211_dfs_regions region;
+
+       u8 chirp_pulse_cnt;
+       u32 chirp_pulse_ts;
+
+       struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
+       struct tasklet_struct dfs_tasklet;
+};
+
+void mt76x2_dfs_init_params(struct mt76x2_dev *dev);
+void mt76x2_dfs_init_detector(struct mt76x2_dev *dev);
+void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev);
+
+#endif /* __MT76x2_DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
new file mode 100644 (file)
index 0000000..0a3f729
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+int
+mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
+                   struct sk_buff *skb, int cmd, int seq)
+{
+       struct mt76_queue *q = &dev->mt76.q_tx[qid];
+       struct mt76_queue_buf buf;
+       dma_addr_t addr;
+       u32 tx_info;
+
+       tx_info = MT_MCU_MSG_TYPE_CMD |
+                 FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+                 FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+                 FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+                 FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
+
+       addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
+                             DMA_TO_DEVICE);
+       if (dma_mapping_error(dev->mt76.dev, addr))
+               return -ENOMEM;
+
+       buf.addr = addr;
+       buf.len = skb->len;
+       spin_lock_bh(&q->lock);
+       mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
+       mt76_queue_kick(dev, q);
+       spin_unlock_bh(&q->lock);
+
+       return 0;
+}
+
+static int
+mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
+                    int idx, int n_desc)
+{
+       int ret;
+
+       q->regs = dev->mt76.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
+       q->ndesc = n_desc;
+
+       ret = mt76_queue_alloc(dev, q);
+       if (ret)
+               return ret;
+
+       mt76x2_irq_enable(dev, MT_INT_TX_DONE(idx));
+
+       return 0;
+}
+
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+                        struct sk_buff *skb)
+{
+       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+       void *rxwi = skb->data;
+
+       if (q == MT_RXQ_MCU) {
+               skb_queue_tail(&dev->mcu.res_q, skb);
+               wake_up(&dev->mcu.wait);
+               return;
+       }
+
+       skb_pull(skb, sizeof(struct mt76x2_rxwi));
+       if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
+               dev_kfree_skb(skb);
+               return;
+       }
+
+       mt76_rx(&dev->mt76, q, skb);
+}
+
+static int
+mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
+                    int idx, int n_desc, int bufsize)
+{
+       int ret;
+
+       q->regs = dev->mt76.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
+       q->ndesc = n_desc;
+       q->buf_size = bufsize;
+
+       ret = mt76_queue_alloc(dev, q);
+       if (ret)
+               return ret;
+
+       mt76x2_irq_enable(dev, MT_INT_RX_DONE(idx));
+
+       return 0;
+}
+
+static void
+mt76x2_tx_tasklet(unsigned long data)
+{
+       struct mt76x2_dev *dev = (struct mt76x2_dev *) data;
+       int i;
+
+       mt76x2_mac_process_tx_status_fifo(dev);
+
+       for (i = MT_TXQ_MCU; i >= 0; i--)
+               mt76_queue_tx_cleanup(dev, i, false);
+
+       mt76x2_mac_poll_tx_status(dev, false);
+       mt76x2_irq_enable(dev, MT_INT_TX_DONE_ALL);
+}
+
+int mt76x2_dma_init(struct mt76x2_dev *dev)
+{
+       static const u8 wmm_queue_map[] = {
+               [IEEE80211_AC_BE] = 0,
+               [IEEE80211_AC_BK] = 1,
+               [IEEE80211_AC_VI] = 2,
+               [IEEE80211_AC_VO] = 3,
+       };
+       int ret;
+       int i;
+       struct mt76_txwi_cache __maybe_unused *t;
+       struct mt76_queue *q;
+
+       BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x2_txwi));
+       BUILD_BUG_ON(sizeof(struct mt76x2_rxwi) > MT_RX_HEADROOM);
+
+       mt76_dma_attach(&dev->mt76);
+
+       init_waitqueue_head(&dev->mcu.wait);
+       skb_queue_head_init(&dev->mcu.res_q);
+
+       tasklet_init(&dev->tx_tasklet, mt76x2_tx_tasklet, (unsigned long) dev);
+
+       mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+       for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+               ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[i],
+                                          wmm_queue_map[i], MT_TX_RING_SIZE);
+               if (ret)
+                       return ret;
+       }
+
+       ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+                                  MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+       if (ret)
+               return ret;
+
+       ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+                                  MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
+       if (ret)
+               return ret;
+
+       ret = mt76x2_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+                                  MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+       if (ret)
+               return ret;
+
+       q = &dev->mt76.q_rx[MT_RXQ_MAIN];
+       q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x2_rxwi);
+       ret = mt76x2_init_rx_queue(dev, q, 0, MT76x2_RX_RING_SIZE, MT_RX_BUF_SIZE);
+       if (ret)
+               return ret;
+
+       return mt76_init_queues(dev);
+}
+
+void mt76x2_dma_cleanup(struct mt76x2_dev *dev)
+{
+       tasklet_kill(&dev->tx_tasklet);
+       mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
new file mode 100644 (file)
index 0000000..47f79d8
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_DMA_H
+#define __MT76x2_DMA_H
+
+#include "dma.h"
+
+#define MT_TXD_INFO_LEN                        GENMASK(13, 0)
+#define MT_TXD_INFO_NEXT_VLD           BIT(16)
+#define MT_TXD_INFO_TX_BURST           BIT(17)
+#define MT_TXD_INFO_80211              BIT(19)
+#define MT_TXD_INFO_TSO                        BIT(20)
+#define MT_TXD_INFO_CSO                        BIT(21)
+#define MT_TXD_INFO_WIV                        BIT(24)
+#define MT_TXD_INFO_QSEL               GENMASK(26, 25)
+#define MT_TXD_INFO_TCO                        BIT(29)
+#define MT_TXD_INFO_UCO                        BIT(30)
+#define MT_TXD_INFO_ICO                        BIT(31)
+
+#define MT_RX_FCE_INFO_LEN             GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN                BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ         GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE                GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR       BIT(24)
+#define MT_RX_FCE_INFO_QSEL            GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT          GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE            GENMASK(31, 30)
+
+/* MCU request message header  */
+#define MT_MCU_MSG_LEN                 GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ             GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE            GENMASK(26, 20)
+#define MT_MCU_MSG_PORT                        GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE                        GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD            BIT(30)
+
+enum mt76x2_qsel {
+       MT_QSEL_MGMT,
+       MT_QSEL_HCCA,
+       MT_QSEL_EDCA,
+       MT_QSEL_EDCA_2,
+};
+
+enum dma_msg_port {
+       WLAN_PORT,
+       CPU_RX_PORT,
+       CPU_TX_PORT,
+       HOST_PORT,
+       VIRTUAL_CPU_RX_PORT,
+       VIRTUAL_CPU_TX_PORT,
+       DISCARD,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
new file mode 100644 (file)
index 0000000..440b7e7
--- /dev/null
@@ -0,0 +1,647 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
+
+static int
+mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field,
+                  void *dest, int len)
+{
+       if (field + len > dev->mt76.eeprom.size)
+               return -1;
+
+       memcpy(dest, dev->mt76.eeprom.data + field, len);
+       return 0;
+}
+
+static int
+mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
+{
+       void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
+
+       memcpy(dev->mt76.macaddr, src, ETH_ALEN);
+       return 0;
+}
+
+static void
+mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
+{
+       u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+       switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
+       case BOARD_TYPE_5GHZ:
+               dev->mt76.cap.has_5ghz = true;
+               break;
+       case BOARD_TYPE_2GHZ:
+               dev->mt76.cap.has_2ghz = true;
+               break;
+       default:
+               dev->mt76.cap.has_2ghz = true;
+               dev->mt76.cap.has_5ghz = true;
+               break;
+       }
+}
+
+static int
+mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
+{
+       u32 val;
+       int i;
+
+       val = mt76_rr(dev, MT_EFUSE_CTRL);
+       val &= ~(MT_EFUSE_CTRL_AIN |
+                MT_EFUSE_CTRL_MODE);
+       val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+       val |= MT_EFUSE_CTRL_KICK;
+       mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+       if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+               return -ETIMEDOUT;
+
+       udelay(2);
+
+       val = mt76_rr(dev, MT_EFUSE_CTRL);
+       if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+               memset(data, 0xff, 16);
+               return 0;
+       }
+
+       for (i = 0; i < 4; i++) {
+               val = mt76_rr(dev, MT_EFUSE_DATA(i));
+               put_unaligned_le32(val, data + 4 * i);
+       }
+
+       return 0;
+}
+
+static int
+mt76x2_get_efuse_data(struct mt76x2_dev *dev, void *buf, int len)
+{
+       int ret, i;
+
+       for (i = 0; i + 16 <= len; i += 16) {
+               ret = mt76x2_efuse_read(dev, i, buf + i);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static bool
+mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+{
+       u16 *efuse_w = (u16 *) efuse;
+
+       if (efuse_w[MT_EE_NIC_CONF_0] != 0)
+               return false;
+
+       if (efuse_w[MT_EE_XTAL_TRIM_1] == 0xffff)
+               return false;
+
+       if (efuse_w[MT_EE_TX_POWER_DELTA_BW40] != 0)
+               return false;
+
+       if (efuse_w[MT_EE_TX_POWER_0_START_2G] == 0xffff)
+               return false;
+
+       if (efuse_w[MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA] != 0)
+               return false;
+
+       if (efuse_w[MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE] == 0xffff)
+               return false;
+
+       return true;
+}
+
+static void
+mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+{
+#define GROUP_5G(_id)                                                     \
+       MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id),     \
+       MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1, \
+       MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id),     \
+       MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1
+
+       static const u8 cal_free_bytes[] = {
+               MT_EE_XTAL_TRIM_1,
+               MT_EE_TX_POWER_EXT_PA_5G + 1,
+               MT_EE_TX_POWER_0_START_2G,
+               MT_EE_TX_POWER_0_START_2G + 1,
+               MT_EE_TX_POWER_1_START_2G,
+               MT_EE_TX_POWER_1_START_2G + 1,
+               GROUP_5G(0),
+               GROUP_5G(1),
+               GROUP_5G(2),
+               GROUP_5G(3),
+               GROUP_5G(4),
+               GROUP_5G(5),
+               MT_EE_RF_2G_TSSI_OFF_TXPOWER,
+               MT_EE_RF_2G_RX_HIGH_GAIN + 1,
+               MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN,
+               MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN + 1,
+               MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN,
+               MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN + 1,
+               MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN,
+               MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN + 1,
+       };
+       u8 *eeprom = dev->mt76.eeprom.data;
+       u8 prev_grp0[4] = {
+               eeprom[MT_EE_TX_POWER_0_START_5G],
+               eeprom[MT_EE_TX_POWER_0_START_5G + 1],
+               eeprom[MT_EE_TX_POWER_1_START_5G],
+               eeprom[MT_EE_TX_POWER_1_START_5G + 1]
+       };
+       u16 val;
+       int i;
+
+       if (!mt76x2_has_cal_free_data(dev, efuse))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(cal_free_bytes); i++) {
+               int offset = cal_free_bytes[i];
+
+               eeprom[offset] = efuse[offset];
+       }
+
+       if (!(efuse[MT_EE_TX_POWER_0_START_5G] |
+             efuse[MT_EE_TX_POWER_0_START_5G + 1]))
+               memcpy(eeprom + MT_EE_TX_POWER_0_START_5G, prev_grp0, 2);
+       if (!(efuse[MT_EE_TX_POWER_1_START_5G] |
+             efuse[MT_EE_TX_POWER_1_START_5G + 1]))
+               memcpy(eeprom + MT_EE_TX_POWER_1_START_5G, prev_grp0 + 2, 2);
+
+       val = get_unaligned_le16(efuse + MT_EE_BT_RCAL_RESULT);
+       if (val != 0xffff)
+               eeprom[MT_EE_BT_RCAL_RESULT] = val & 0xff;
+
+       val = get_unaligned_le16(efuse + MT_EE_BT_VCDL_CALIBRATION);
+       if (val != 0xffff)
+               eeprom[MT_EE_BT_VCDL_CALIBRATION + 1] = val >> 8;
+
+       val = get_unaligned_le16(efuse + MT_EE_BT_PMUCFG);
+       if (val != 0xffff)
+               eeprom[MT_EE_BT_PMUCFG] = val & 0xff;
+}
+
+static int mt76x2_check_eeprom(struct mt76x2_dev *dev)
+{
+       u16 val = get_unaligned_le16(dev->mt76.eeprom.data);
+
+       if (!val)
+               val = get_unaligned_le16(dev->mt76.eeprom.data + MT_EE_PCI_ID);
+
+       switch (val) {
+       case 0x7662:
+       case 0x7612:
+               return 0;
+       default:
+               dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n", val);
+               return -EINVAL;
+       }
+}
+
+static int
+mt76x2_eeprom_load(struct mt76x2_dev *dev)
+{
+       void *efuse;
+       int len = MT7662_EEPROM_SIZE;
+       bool found;
+       int ret;
+
+       ret = mt76_eeprom_init(&dev->mt76, len);
+       if (ret < 0)
+               return ret;
+
+       found = ret;
+       if (found)
+               found = !mt76x2_check_eeprom(dev);
+
+       dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
+       dev->mt76.otp.size = len;
+       if (!dev->mt76.otp.data)
+               return -ENOMEM;
+
+       efuse = dev->mt76.otp.data;
+
+       if (mt76x2_get_efuse_data(dev, efuse, len))
+               goto out;
+
+       if (found) {
+               mt76x2_apply_cal_free_data(dev, efuse);
+       } else {
+               /* FIXME: check if efuse data is complete */
+               found = true;
+               memcpy(dev->mt76.eeprom.data, efuse, len);
+       }
+
+out:
+       if (!found)
+               return -ENOENT;
+
+       return 0;
+}
+
+static inline int
+mt76x2_sign_extend(u32 val, unsigned int size)
+{
+       bool sign = val & BIT(size - 1);
+
+       val &= BIT(size - 1) - 1;
+
+       return sign ? val : -val;
+}
+
+static inline int
+mt76x2_sign_extend_optional(u32 val, unsigned int size)
+{
+       bool enable = val & BIT(size);
+
+       return enable ? mt76x2_sign_extend(val, size) : 0;
+}
+
+static bool
+field_valid(u8 val)
+{
+       return val != 0 && val != 0xff;
+}
+
+static void
+mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val)
+{
+       s8 *dest = dev->cal.rx.high_gain;
+
+       if (!field_valid(val)) {
+               dest[0] = 0;
+               dest[1] = 0;
+               return;
+       }
+
+       dest[0] = mt76x2_sign_extend(val, 4);
+       dest[1] = mt76x2_sign_extend(val >> 4, 4);
+}
+
+static void
+mt76x2_set_rssi_offset(struct mt76x2_dev *dev, int chain, u8 val)
+{
+       s8 *dest = dev->cal.rx.rssi_offset;
+
+       if (!field_valid(val)) {
+               dest[chain] = 0;
+               return;
+       }
+
+       dest[chain] = mt76x2_sign_extend_optional(val, 7);
+}
+
+static enum mt76x2_cal_channel_group
+mt76x2_get_cal_channel_group(int channel)
+{
+       if (channel >= 184 && channel <= 196)
+               return MT_CH_5G_JAPAN;
+       if (channel <= 48)
+               return MT_CH_5G_UNII_1;
+       if (channel <= 64)
+               return MT_CH_5G_UNII_2;
+       if (channel <= 114)
+               return MT_CH_5G_UNII_2E_1;
+       if (channel <= 144)
+               return MT_CH_5G_UNII_2E_2;
+       return MT_CH_5G_UNII_3;
+}
+
+static u8
+mt76x2_get_5g_rx_gain(struct mt76x2_dev *dev, u8 channel)
+{
+       enum mt76x2_cal_channel_group group;
+
+       group = mt76x2_get_cal_channel_group(channel);
+       switch (group) {
+       case MT_CH_5G_JAPAN:
+               return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
+       case MT_CH_5G_UNII_1:
+               return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
+       case MT_CH_5G_UNII_2:
+               return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
+       case MT_CH_5G_UNII_2E_1:
+               return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
+       case MT_CH_5G_UNII_2E_2:
+               return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
+       default:
+               return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
+       }
+}
+
+void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
+{
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+       int channel = chan->hw_value;
+       s8 lna_5g[3], lna_2g;
+       u8 lna;
+       u16 val;
+
+       if (chan->band == NL80211_BAND_2GHZ)
+               val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
+       else
+               val = mt76x2_get_5g_rx_gain(dev, channel);
+
+       mt76x2_set_rx_gain_group(dev, val);
+
+       if (chan->band == NL80211_BAND_2GHZ) {
+               val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0);
+               mt76x2_set_rssi_offset(dev, 0, val);
+               mt76x2_set_rssi_offset(dev, 1, val >> 8);
+       } else {
+               val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0);
+               mt76x2_set_rssi_offset(dev, 0, val);
+               mt76x2_set_rssi_offset(dev, 1, val >> 8);
+       }
+
+       val = mt76x2_eeprom_get(dev, MT_EE_LNA_GAIN);
+       lna_2g = val & 0xff;
+       lna_5g[0] = val >> 8;
+
+       val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1);
+       lna_5g[1] = val >> 8;
+
+       val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1);
+       lna_5g[2] = val >> 8;
+
+       if (!field_valid(lna_5g[1]))
+               lna_5g[1] = lna_5g[0];
+
+       if (!field_valid(lna_5g[2]))
+               lna_5g[2] = lna_5g[0];
+
+       dev->cal.rx.mcu_gain =  (lna_2g & 0xff);
+       dev->cal.rx.mcu_gain |= (lna_5g[0] & 0xff) << 8;
+       dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
+       dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
+
+       val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+       if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
+               lna_2g = 0;
+       if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
+               memset(lna_5g, 0, sizeof(lna_5g));
+
+       if (chan->band == NL80211_BAND_2GHZ)
+               lna = lna_2g;
+       else if (channel <= 64)
+               lna = lna_5g[0];
+       else if (channel <= 128)
+               lna = lna_5g[1];
+       else
+               lna = lna_5g[2];
+
+       if (lna == 0xff)
+               lna = 0;
+
+       dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
+}
+
+static s8
+mt76x2_rate_power_val(u8 val)
+{
+       if (!field_valid(val))
+               return 0;
+
+       return mt76x2_sign_extend_optional(val, 7);
+}
+
+void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t)
+{
+       bool is_5ghz;
+       u16 val;
+
+       is_5ghz = dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ;
+
+       memset(t, 0, sizeof(*t));
+
+       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_CCK);
+       t->cck[0] = t->cck[1] = mt76x2_rate_power_val(val);
+       t->cck[2] = t->cck[3] = mt76x2_rate_power_val(val >> 8);
+
+       if (is_5ghz)
+               val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
+       else
+               val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
+       t->ofdm[0] = t->ofdm[1] = mt76x2_rate_power_val(val);
+       t->ofdm[2] = t->ofdm[3] = mt76x2_rate_power_val(val >> 8);
+
+       if (is_5ghz)
+               val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
+       else
+               val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
+       t->ofdm[4] = t->ofdm[5] = mt76x2_rate_power_val(val);
+       t->ofdm[6] = t->ofdm[7] = mt76x2_rate_power_val(val >> 8);
+
+       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
+       t->ht[0] = t->ht[1] = mt76x2_rate_power_val(val);
+       t->ht[2] = t->ht[3] = mt76x2_rate_power_val(val >> 8);
+
+       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
+       t->ht[4] = t->ht[5] = mt76x2_rate_power_val(val);
+       t->ht[6] = t->ht[7] = mt76x2_rate_power_val(val >> 8);
+
+       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
+       t->ht[8] = t->ht[9] = mt76x2_rate_power_val(val);
+       t->ht[10] = t->ht[11] = mt76x2_rate_power_val(val >> 8);
+
+       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
+       t->ht[12] = t->ht[13] = mt76x2_rate_power_val(val);
+       t->ht[14] = t->ht[15] = mt76x2_rate_power_val(val >> 8);
+
+       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
+       t->vht[0] = t->vht[1] = mt76x2_rate_power_val(val);
+       t->vht[2] = t->vht[3] = mt76x2_rate_power_val(val >> 8);
+
+       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
+       t->vht[4] = t->vht[5] = mt76x2_rate_power_val(val);
+       t->vht[6] = t->vht[7] = mt76x2_rate_power_val(val >> 8);
+
+       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
+       if (!is_5ghz)
+               val >>= 8;
+       t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
+}
+
+static void
+mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
+                      int chain, int offset)
+{
+       int channel = dev->mt76.chandef.chan->hw_value;
+       int delta_idx;
+       u8 data[6];
+       u16 val;
+
+       if (channel < 6)
+               delta_idx = 3;
+       else if (channel < 11)
+               delta_idx = 4;
+       else
+               delta_idx = 5;
+
+       mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+
+       t->chain[chain].tssi_slope = data[0];
+       t->chain[chain].tssi_offset = data[1];
+       t->chain[chain].target_power = data[2];
+       t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
+
+       val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
+       t->target_power = val >> 8;
+}
+
+static void
+mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
+                      int chain, int offset)
+{
+       int channel = dev->mt76.chandef.chan->hw_value;
+       enum mt76x2_cal_channel_group group;
+       int delta_idx;
+       u16 val;
+       u8 data[5];
+
+       group = mt76x2_get_cal_channel_group(channel);
+       offset += group * MT_TX_POWER_GROUP_SIZE_5G;
+
+       if (channel >= 192)
+               delta_idx = 4;
+       else if (channel >= 484)
+               delta_idx = 3;
+       else if (channel < 44)
+               delta_idx = 3;
+       else if (channel < 52)
+               delta_idx = 4;
+       else if (channel < 58)
+               delta_idx = 3;
+       else if (channel < 98)
+               delta_idx = 4;
+       else if (channel < 106)
+               delta_idx = 3;
+       else if (channel < 116)
+               delta_idx = 4;
+       else if (channel < 130)
+               delta_idx = 3;
+       else if (channel < 149)
+               delta_idx = 4;
+       else if (channel < 157)
+               delta_idx = 3;
+       else
+               delta_idx = 4;
+
+       mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+
+       t->chain[chain].tssi_slope = data[0];
+       t->chain[chain].tssi_offset = data[1];
+       t->chain[chain].target_power = data[2];
+       t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
+
+       val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
+       t->target_power = val & 0xff;
+}
+
+void mt76x2_get_power_info(struct mt76x2_dev *dev,
+                          struct mt76x2_tx_power_info *t)
+{
+       u16 bw40, bw80;
+
+       memset(t, 0, sizeof(*t));
+
+       bw40 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+       bw80 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
+
+       if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) {
+               bw40 >>= 8;
+               mt76x2_get_power_info_5g(dev, t, 0, MT_EE_TX_POWER_0_START_5G);
+               mt76x2_get_power_info_5g(dev, t, 1, MT_EE_TX_POWER_1_START_5G);
+       } else {
+               mt76x2_get_power_info_2g(dev, t, 0, MT_EE_TX_POWER_0_START_2G);
+               mt76x2_get_power_info_2g(dev, t, 1, MT_EE_TX_POWER_1_START_2G);
+       }
+
+       if (mt76x2_tssi_enabled(dev) || !field_valid(t->target_power))
+               t->target_power = t->chain[0].target_power;
+
+       t->delta_bw40 = mt76x2_rate_power_val(bw40);
+       t->delta_bw80 = mt76x2_rate_power_val(bw80);
+}
+
+int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
+{
+       enum nl80211_band band = dev->mt76.chandef.chan->band;
+       u16 val, slope;
+       u8 bounds;
+
+       memset(t, 0, sizeof(*t));
+
+       val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+       if (!(val & MT_EE_NIC_CONF_1_TEMP_TX_ALC))
+               return -EINVAL;
+
+       if (!mt76x2_ext_pa_enabled(dev, band))
+               return -EINVAL;
+
+       val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
+       if (!(val & BIT(7)))
+               return -EINVAL;
+
+       t->temp_25_ref = val & 0x7f;
+       if (band == NL80211_BAND_5GHZ) {
+               slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
+               bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+       } else {
+               slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
+               bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80) >> 8;
+       }
+
+       t->high_slope = slope & 0xff;
+       t->low_slope = slope >> 8;
+       t->lower_bound = 0 - (bounds & 0xf);
+       t->upper_bound = (bounds >> 4) & 0xf;
+
+       return 0;
+}
+
+bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
+{
+       u16 conf0 = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+       if (band == NL80211_BAND_5GHZ)
+               return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G);
+       else
+               return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
+}
+
+int mt76x2_eeprom_init(struct mt76x2_dev *dev)
+{
+       int ret;
+
+       ret = mt76x2_eeprom_load(dev);
+       if (ret)
+               return ret;
+
+       mt76x2_eeprom_parse_hw_cap(dev);
+       mt76x2_eeprom_get_macaddr(dev);
+       mt76_eeprom_override(&dev->mt76);
+       dev->mt76.macaddr[0] &= ~BIT(1);
+
+       return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
new file mode 100644 (file)
index 0000000..063d6c8
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_EEPROM_H
+#define __MT76x2_EEPROM_H
+
+#include "mt76x2.h"
+
+enum mt76x2_eeprom_field {
+       MT_EE_CHIP_ID =                         0x000,
+       MT_EE_VERSION =                         0x002,
+       MT_EE_MAC_ADDR =                        0x004,
+       MT_EE_PCI_ID =                          0x00A,
+       MT_EE_NIC_CONF_0 =                      0x034,
+       MT_EE_NIC_CONF_1 =                      0x036,
+       MT_EE_NIC_CONF_2 =                      0x042,
+
+       MT_EE_XTAL_TRIM_1 =                     0x03a,
+       MT_EE_XTAL_TRIM_2 =                     0x09e,
+
+       MT_EE_LNA_GAIN =                        0x044,
+       MT_EE_RSSI_OFFSET_2G_0 =                0x046,
+       MT_EE_RSSI_OFFSET_2G_1 =                0x048,
+       MT_EE_RSSI_OFFSET_5G_0 =                0x04a,
+       MT_EE_RSSI_OFFSET_5G_1 =                0x04c,
+
+       MT_EE_TX_POWER_DELTA_BW40 =             0x050,
+       MT_EE_TX_POWER_DELTA_BW80 =             0x052,
+
+       MT_EE_TX_POWER_EXT_PA_5G =              0x054,
+
+       MT_EE_TX_POWER_0_START_2G =             0x056,
+       MT_EE_TX_POWER_1_START_2G =             0x05c,
+
+       /* used as byte arrays */
+#define MT_TX_POWER_GROUP_SIZE_5G              5
+#define MT_TX_POWER_GROUPS_5G                  6
+       MT_EE_TX_POWER_0_START_5G =             0x062,
+
+       MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA =  0x074,
+       MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE =      0x076,
+
+       MT_EE_TX_POWER_1_START_5G =             0x080,
+
+       MT_EE_TX_POWER_CCK =                    0x0a0,
+       MT_EE_TX_POWER_OFDM_2G_6M =             0x0a2,
+       MT_EE_TX_POWER_OFDM_2G_24M =            0x0a4,
+       MT_EE_TX_POWER_OFDM_5G_6M =             0x0b2,
+       MT_EE_TX_POWER_OFDM_5G_24M =            0x0b4,
+       MT_EE_TX_POWER_HT_MCS0 =                0x0a6,
+       MT_EE_TX_POWER_HT_MCS4 =                0x0a8,
+       MT_EE_TX_POWER_HT_MCS8 =                0x0aa,
+       MT_EE_TX_POWER_HT_MCS12 =               0x0ac,
+       MT_EE_TX_POWER_VHT_MCS0 =               0x0ba,
+       MT_EE_TX_POWER_VHT_MCS4 =               0x0bc,
+       MT_EE_TX_POWER_VHT_MCS8 =               0x0be,
+
+       MT_EE_RF_TEMP_COMP_SLOPE_5G =           0x0f2,
+       MT_EE_RF_TEMP_COMP_SLOPE_2G =           0x0f4,
+
+       MT_EE_RF_2G_TSSI_OFF_TXPOWER =          0x0f6,
+       MT_EE_RF_2G_RX_HIGH_GAIN =              0x0f8,
+       MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN =       0x0fa,
+       MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN =       0x0fc,
+       MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN =       0x0fe,
+
+       MT_EE_BT_RCAL_RESULT =                  0x138,
+       MT_EE_BT_VCDL_CALIBRATION =             0x13c,
+       MT_EE_BT_PMUCFG =                       0x13e,
+
+       __MT_EE_MAX
+};
+
+#define MT_EE_NIC_CONF_0_PA_INT_2G             BIT(8)
+#define MT_EE_NIC_CONF_0_PA_INT_5G             BIT(9)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE            GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC           BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G            BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G            BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN             BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM             GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM             GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV             BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION           GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE          BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD           GENMASK(15, 13)
+
+enum mt76x2_board_type {
+       BOARD_TYPE_2GHZ = 1,
+       BOARD_TYPE_5GHZ = 2,
+};
+
+enum mt76x2_cal_channel_group {
+       MT_CH_5G_JAPAN,
+       MT_CH_5G_UNII_1,
+       MT_CH_5G_UNII_2,
+       MT_CH_5G_UNII_2E_1,
+       MT_CH_5G_UNII_2E_2,
+       MT_CH_5G_UNII_3,
+       __MT_CH_MAX
+};
+
+struct mt76x2_tx_power_info {
+       u8 target_power;
+
+       s8 delta_bw40;
+       s8 delta_bw80;
+
+       struct {
+               s8 tssi_slope;
+               s8 tssi_offset;
+               s8 target_power;
+               s8 delta;
+       } chain[MT_MAX_CHAINS];
+};
+
+struct mt76x2_temp_comp {
+       u8 temp_25_ref;
+       int lower_bound; /* J */
+       int upper_bound; /* J */
+       unsigned int high_slope; /* J / dB */
+       unsigned int low_slope; /* J / dB */
+};
+
+static inline int
+mt76x2_eeprom_get(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field)
+{
+       if ((field & 1) || field >= __MT_EE_MAX)
+               return -1;
+
+       return get_unaligned_le16(dev->mt76.eeprom.data + field);
+}
+
+void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t);
+void mt76x2_get_power_info(struct mt76x2_dev *dev,
+                          struct mt76x2_tx_power_info *t);
+int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
+bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
+void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
+
+static inline bool
+mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
+{
+       return mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+              MT_EE_NIC_CONF_1_TEMP_TX_ALC;
+}
+
+static inline bool
+mt76x2_tssi_enabled(struct mt76x2_dev *dev)
+{
+       return !mt76x2_temp_tx_alc_enabled(dev) &&
+              (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+               MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+static inline bool
+mt76x2_has_ext_lna(struct mt76x2_dev *dev)
+{
+       u32 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+
+       if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+               return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
+       else
+               return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
new file mode 100644 (file)
index 0000000..d3f03a8
--- /dev/null
@@ -0,0 +1,839 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+#include "mt76x2_mcu.h"
+
+struct mt76x2_reg_pair {
+       u32 reg;
+       u32 value;
+};
+
+static bool
+mt76x2_wait_for_mac(struct mt76x2_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < 500; i++) {
+               switch (mt76_rr(dev, MT_MAC_CSR0)) {
+               case 0:
+               case ~0:
+                       break;
+               default:
+                       return true;
+               }
+               usleep_range(5000, 10000);
+       }
+
+       return false;
+}
+
+static bool
+wait_for_wpdma(struct mt76x2_dev *dev)
+{
+       return mt76_poll(dev, MT_WPDMA_GLO_CFG,
+                        MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+                        MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+                        0, 1000);
+}
+
+static void
+mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
+{
+       u32 val;
+
+       val = MT_PBF_SYS_CTRL_MCU_RESET |
+             MT_PBF_SYS_CTRL_DMA_RESET |
+             MT_PBF_SYS_CTRL_MAC_RESET |
+             MT_PBF_SYS_CTRL_PBF_RESET |
+             MT_PBF_SYS_CTRL_ASY_RESET;
+
+       mt76_set(dev, MT_PBF_SYS_CTRL, val);
+       mt76_clear(dev, MT_PBF_SYS_CTRL, val);
+
+       mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+       mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+}
+
+static void
+mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
+                      const struct mt76x2_reg_pair *data, int len)
+{
+       while (len > 0) {
+               mt76_wr(dev, data->reg, data->value);
+               len--;
+               data++;
+       }
+}
+
+static void
+mt76_write_mac_initvals(struct mt76x2_dev *dev)
+{
+#define DEFAULT_PROT_CFG                               \
+       (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |         \
+        FIELD_PREP(MT_PROT_CFG_NAV, 1) |                       \
+        FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) |     \
+        MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_20                            \
+       (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |         \
+        FIELD_PREP(MT_PROT_CFG_CTRL, 1) |              \
+        FIELD_PREP(MT_PROT_CFG_NAV, 1) |                       \
+        FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
+
+#define DEFAULT_PROT_CFG_40                            \
+       (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) |         \
+        FIELD_PREP(MT_PROT_CFG_CTRL, 1) |              \
+        FIELD_PREP(MT_PROT_CFG_NAV, 1) |                       \
+        FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
+
+       static const struct mt76x2_reg_pair vals[] = {
+               /* Copied from MediaTek reference source */
+               { MT_PBF_SYS_CTRL,              0x00080c00 },
+               { MT_PBF_CFG,                   0x1efebcff },
+               { MT_FCE_PSE_CTRL,              0x00000001 },
+               { MT_MAC_SYS_CTRL,              0x0000000c },
+               { MT_MAX_LEN_CFG,               0x003e3f00 },
+               { MT_AMPDU_MAX_LEN_20M1S,       0xaaa99887 },
+               { MT_AMPDU_MAX_LEN_20M2S,       0x000000aa },
+               { MT_XIFS_TIME_CFG,             0x33a40d0a },
+               { MT_BKOFF_SLOT_CFG,            0x00000209 },
+               { MT_TBTT_SYNC_CFG,             0x00422010 },
+               { MT_PWR_PIN_CFG,               0x00000000 },
+               { 0x1238,                       0x001700c8 },
+               { MT_TX_SW_CFG0,                0x00101001 },
+               { MT_TX_SW_CFG1,                0x00010000 },
+               { MT_TX_SW_CFG2,                0x00000000 },
+               { MT_TXOP_CTRL_CFG,             0x0400583f },
+               { MT_TX_RTS_CFG,                0x00100020 },
+               { MT_TX_TIMEOUT_CFG,            0x000a2290 },
+               { MT_TX_RETRY_CFG,              0x47f01f0f },
+               { MT_EXP_ACK_TIME,              0x002c00dc },
+               { MT_TX_PROT_CFG6,              0xe3f42004 },
+               { MT_TX_PROT_CFG7,              0xe3f42084 },
+               { MT_TX_PROT_CFG8,              0xe3f42104 },
+               { MT_PIFS_TX_CFG,               0x00060fff },
+               { MT_RX_FILTR_CFG,              0x00015f97 },
+               { MT_LEGACY_BASIC_RATE,         0x0000017f },
+               { MT_HT_BASIC_RATE,             0x00004003 },
+               { MT_PN_PAD_MODE,               0x00000002 },
+               { MT_TXOP_HLDR_ET,              0x00000002 },
+               { 0xa44,                        0x00000000 },
+               { MT_HEADER_TRANS_CTRL_REG,     0x00000000 },
+               { MT_TSO_CTRL,                  0x00000000 },
+               { MT_AUX_CLK_CFG,               0x00000000 },
+               { MT_DACCLK_EN_DLY_CFG,         0x00000000 },
+               { MT_TX_ALC_CFG_4,              0x00000000 },
+               { MT_TX_ALC_VGA3,               0x00000000 },
+               { MT_TX_PWR_CFG_0,              0x3a3a3a3a },
+               { MT_TX_PWR_CFG_1,              0x3a3a3a3a },
+               { MT_TX_PWR_CFG_2,              0x3a3a3a3a },
+               { MT_TX_PWR_CFG_3,              0x3a3a3a3a },
+               { MT_TX_PWR_CFG_4,              0x3a3a3a3a },
+               { MT_TX_PWR_CFG_7,              0x3a3a3a3a },
+               { MT_TX_PWR_CFG_8,              0x0000003a },
+               { MT_TX_PWR_CFG_9,              0x0000003a },
+               { MT_EFUSE_CTRL,                0x0000d000 },
+               { MT_PAUSE_ENABLE_CONTROL1,     0x0000000a },
+               { MT_FCE_WLAN_FLOW_CONTROL1,    0x60401c18 },
+               { MT_WPDMA_DELAY_INT_CFG,       0x94ff0000 },
+               { MT_TX_SW_CFG3,                0x00000004 },
+               { MT_HT_FBK_TO_LEGACY,          0x00001818 },
+               { MT_VHT_HT_FBK_CFG1,           0xedcba980 },
+               { MT_PROT_AUTO_TX_CFG,          0x00830083 },
+               { MT_HT_CTRL_CFG,               0x000001ff },
+       };
+       struct mt76x2_reg_pair prot_vals[] = {
+               { MT_CCK_PROT_CFG,              DEFAULT_PROT_CFG },
+               { MT_OFDM_PROT_CFG,             DEFAULT_PROT_CFG },
+               { MT_MM20_PROT_CFG,             DEFAULT_PROT_CFG_20 },
+               { MT_MM40_PROT_CFG,             DEFAULT_PROT_CFG_40 },
+               { MT_GF20_PROT_CFG,             DEFAULT_PROT_CFG_20 },
+               { MT_GF40_PROT_CFG,             DEFAULT_PROT_CFG_40 },
+       };
+
+       mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
+       mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
+}
+
+static void
+mt76x2_fixup_xtal(struct mt76x2_dev *dev)
+{
+       u16 eep_val;
+       s8 offset = 0;
+
+       eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+       offset = eep_val & 0x7f;
+       if ((eep_val & 0xff) == 0xff)
+               offset = 0;
+       else if (eep_val & 0x80)
+               offset = 0 - offset;
+
+       eep_val >>= 8;
+       if (eep_val == 0x00 || eep_val == 0xff) {
+               eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+               eep_val &= 0xff;
+
+               if (eep_val == 0x00 || eep_val == 0xff)
+                       eep_val = 0x14;
+       }
+
+       eep_val &= 0x7f;
+       mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
+       mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
+
+       eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+       switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+       case 0:
+               mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+               break;
+       case 1:
+               mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+               break;
+       default:
+               break;
+       }
+}
+
+static void
+mt76x2_init_beacon_offsets(struct mt76x2_dev *dev)
+{
+       u16 base = MT_BEACON_BASE;
+       u32 regs[4] = {};
+       int i;
+
+       for (i = 0; i < 16; i++) {
+               u16 addr = dev->beacon_offsets[i];
+
+               regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+       }
+
+       for (i = 0; i < 4; i++)
+               mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
+{
+       static const u8 null_addr[ETH_ALEN] = {};
+       const u8 *macaddr = dev->mt76.macaddr;
+       u32 val;
+       int i, k;
+
+       if (!mt76x2_wait_for_mac(dev))
+               return -ETIMEDOUT;
+
+       val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+
+       val &= ~(MT_WPDMA_GLO_CFG_TX_DMA_EN |
+                MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+                MT_WPDMA_GLO_CFG_RX_DMA_EN |
+                MT_WPDMA_GLO_CFG_RX_DMA_BUSY |
+                MT_WPDMA_GLO_CFG_DMA_BURST_SIZE);
+       val |= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3);
+
+       mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+
+       mt76x2_mac_pbf_init(dev);
+       mt76_write_mac_initvals(dev);
+       mt76x2_fixup_xtal(dev);
+
+       mt76_clear(dev, MT_MAC_SYS_CTRL,
+                  MT_MAC_SYS_CTRL_RESET_CSR |
+                  MT_MAC_SYS_CTRL_RESET_BBP);
+
+       if (is_mt7612(dev))
+               mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+       mt76_set(dev, MT_EXT_CCA_CFG, 0x0000f000);
+       mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+       mt76_wr(dev, MT_RF_BYPASS_0, 0x06000000);
+       mt76_wr(dev, MT_RF_SETTING_0, 0x08800000);
+       usleep_range(5000, 10000);
+       mt76_wr(dev, MT_RF_BYPASS_0, 0x00000000);
+
+       mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401);
+       mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+       mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr));
+       mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4));
+
+       mt76_wr(dev, MT_MAC_BSSID_DW0, get_unaligned_le32(macaddr));
+       mt76_wr(dev, MT_MAC_BSSID_DW1, get_unaligned_le16(macaddr + 4) |
+               FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */
+               MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
+
+       /* Fire a pre-TBTT interrupt 8 ms before TBTT */
+       mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
+                      8 << 4);
+       mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
+                      MT_DFS_GP_INTERVAL);
+       mt76_wr(dev, MT_INT_TIMER_EN, 0);
+
+       mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
+       if (!hard)
+               return 0;
+
+       for (i = 0; i < 256 / 32; i++)
+               mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
+
+       for (i = 0; i < 256; i++)
+               mt76x2_mac_wcid_setup(dev, i, 0, NULL);
+
+       for (i = 0; i < 16; i++)
+               for (k = 0; k < 4; k++)
+                       mt76x2_mac_shared_key_setup(dev, i, k, NULL);
+
+       for (i = 0; i < 8; i++) {
+               mt76x2_mac_set_bssid(dev, i, null_addr);
+               mt76x2_mac_set_beacon(dev, i, NULL);
+       }
+
+       for (i = 0; i < 16; i++)
+               mt76_rr(dev, MT_TX_STAT_FIFO);
+
+       mt76_set(dev, MT_MAC_APC_BSSID_H(0), MT_MAC_APC_BSSID0_H_EN);
+
+       mt76_wr(dev, MT_CH_TIME_CFG,
+               MT_CH_TIME_CFG_TIMER_EN |
+               MT_CH_TIME_CFG_TX_AS_BUSY |
+               MT_CH_TIME_CFG_RX_AS_BUSY |
+               MT_CH_TIME_CFG_NAV_AS_BUSY |
+               MT_CH_TIME_CFG_EIFS_AS_BUSY |
+               FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
+
+       mt76x2_init_beacon_offsets(dev);
+
+       mt76x2_set_tx_ackto(dev);
+
+       return 0;
+}
+
+int mt76x2_mac_start(struct mt76x2_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < 16; i++)
+               mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+       for (i = 0; i < 16; i++)
+               mt76_rr(dev, MT_TX_STAT_FIFO);
+
+       memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats));
+
+       mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+       wait_for_wpdma(dev);
+       usleep_range(50, 100);
+
+       mt76_set(dev, MT_WPDMA_GLO_CFG,
+                MT_WPDMA_GLO_CFG_TX_DMA_EN |
+                MT_WPDMA_GLO_CFG_RX_DMA_EN);
+
+       mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+
+       mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+       mt76_wr(dev, MT_MAC_SYS_CTRL,
+               MT_MAC_SYS_CTRL_ENABLE_TX |
+               MT_MAC_SYS_CTRL_ENABLE_RX);
+
+       mt76x2_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+                              MT_INT_TX_STAT);
+
+       return 0;
+}
+
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
+{
+       bool stopped = false;
+       u32 rts_cfg;
+       int i;
+
+       mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+       rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+       mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+       /* Wait for MAC to become idle */
+       for (i = 0; i < 300; i++) {
+               if (mt76_rr(dev, MT_MAC_STATUS) &
+                   (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX))
+                       continue;
+
+               if (mt76_rr(dev, MT_BBP(IBI, 12)))
+                       continue;
+
+               stopped = true;
+               break;
+       }
+
+       if (force && !stopped) {
+               mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+               mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+               mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+               mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+       }
+
+       mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+
+void mt76x2_mac_resume(struct mt76x2_dev *dev)
+{
+       mt76_wr(dev, MT_MAC_SYS_CTRL,
+               MT_MAC_SYS_CTRL_ENABLE_TX |
+               MT_MAC_SYS_CTRL_ENABLE_RX);
+}
+
+static void
+mt76x2_power_on_rf_patch(struct mt76x2_dev *dev)
+{
+       mt76_set(dev, 0x10130, BIT(0) | BIT(16));
+       udelay(1);
+
+       mt76_clear(dev, 0x1001c, 0xff);
+       mt76_set(dev, 0x1001c, 0x30);
+
+       mt76_wr(dev, 0x10014, 0x484f);
+       udelay(1);
+
+       mt76_set(dev, 0x10130, BIT(17));
+       udelay(125);
+
+       mt76_clear(dev, 0x10130, BIT(16));
+       udelay(50);
+
+       mt76_set(dev, 0x1014c, BIT(19) | BIT(20));
+}
+
+static void
+mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit)
+{
+       int shift = unit ? 8 : 0;
+
+       /* Enable RF BG */
+       mt76_set(dev, 0x10130, BIT(0) << shift);
+       udelay(10);
+
+       /* Enable RFDIG LDO/AFE/ABB/ADDA */
+       mt76_set(dev, 0x10130, (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift);
+       udelay(10);
+
+       /* Switch RFDIG power to internal LDO */
+       mt76_clear(dev, 0x10130, BIT(2) << shift);
+       udelay(10);
+
+       mt76x2_power_on_rf_patch(dev);
+
+       mt76_set(dev, 0x530, 0xf);
+}
+
+static void
+mt76x2_power_on(struct mt76x2_dev *dev)
+{
+       u32 val;
+
+       /* Turn on WL MTCMOS */
+       mt76_set(dev, MT_WLAN_MTC_CTRL, MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+       val = MT_WLAN_MTC_CTRL_STATE_UP |
+             MT_WLAN_MTC_CTRL_PWR_ACK |
+             MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+       mt76_poll(dev, MT_WLAN_MTC_CTRL, val, val, 1000);
+
+       mt76_clear(dev, MT_WLAN_MTC_CTRL, 0x7f << 16);
+       udelay(10);
+
+       mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+       udelay(10);
+
+       mt76_set(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+       mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xfff);
+
+       /* Turn on AD/DA power down */
+       mt76_clear(dev, 0x11204, BIT(3));
+
+       /* WLAN function enable */
+       mt76_set(dev, 0x10080, BIT(0));
+
+       /* Release BBP software reset */
+       mt76_clear(dev, 0x10064, BIT(18));
+
+       mt76x2_power_on_rf(dev, 0);
+       mt76x2_power_on_rf(dev, 1);
+}
+
+void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
+{
+       u8 ackto, sifs, slottime = dev->slottime;
+
+       slottime += 3 * dev->coverage_class;
+
+       sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG,
+                             MT_XIFS_TIME_CFG_OFDM_SIFS);
+
+       ackto = slottime + sifs;
+       mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG,
+                      MT_TX_TIMEOUT_CFG_ACKTO, ackto);
+}
+
+static void
+mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
+{
+       u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+       if (enable)
+               val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+                       MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+       else
+               val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
+                        MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+
+       mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+       udelay(20);
+}
+
+static void
+mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+{
+       u32 val;
+
+       val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+       val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+       if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+               val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+               mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+               udelay(20);
+
+               val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+       }
+
+       mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+       udelay(20);
+
+       mt76x2_set_wlan_state(dev, enable);
+}
+
+int mt76x2_init_hardware(struct mt76x2_dev *dev)
+{
+       static const u16 beacon_offsets[16] = {
+               /* 1024 byte per beacon */
+               0xc000,
+               0xc400,
+               0xc800,
+               0xcc00,
+               0xd000,
+               0xd400,
+               0xd800,
+               0xdc00,
+
+               /* BSS idx 8-15 not used for beacons */
+               0xc000,
+               0xc000,
+               0xc000,
+               0xc000,
+               0xc000,
+               0xc000,
+               0xc000,
+               0xc000,
+       };
+       u32 val;
+       int ret;
+
+       dev->beacon_offsets = beacon_offsets;
+       tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
+                    (unsigned long) dev);
+
+       dev->chainmask = 0x202;
+       dev->global_wcid.idx = 255;
+       dev->global_wcid.hw_key_idx = -1;
+       dev->slottime = 9;
+
+       val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+       val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
+              MT_WPDMA_GLO_CFG_BIG_ENDIAN |
+              MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
+       val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
+       mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+
+       mt76x2_reset_wlan(dev, true);
+       mt76x2_power_on(dev);
+
+       ret = mt76x2_eeprom_init(dev);
+       if (ret)
+               return ret;
+
+       ret = mt76x2_mac_reset(dev, true);
+       if (ret)
+               return ret;
+
+       ret = mt76x2_dma_init(dev);
+       if (ret)
+               return ret;
+
+       set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+       ret = mt76x2_mac_start(dev);
+       if (ret)
+               return ret;
+
+       ret = mt76x2_mcu_init(dev);
+       if (ret)
+               return ret;
+
+       mt76x2_mac_stop(dev, false);
+       dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+       return 0;
+}
+
+void mt76x2_stop_hardware(struct mt76x2_dev *dev)
+{
+       cancel_delayed_work_sync(&dev->cal_work);
+       cancel_delayed_work_sync(&dev->mac_work);
+       mt76x2_mcu_set_radio_state(dev, false);
+       mt76x2_mac_stop(dev, false);
+}
+
+void mt76x2_cleanup(struct mt76x2_dev *dev)
+{
+       mt76x2_stop_hardware(dev);
+       mt76x2_dma_cleanup(dev);
+       mt76x2_mcu_cleanup(dev);
+}
+
+struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
+{
+       static const struct mt76_driver_ops drv_ops = {
+               .txwi_size = sizeof(struct mt76x2_txwi),
+               .update_survey = mt76x2_update_channel,
+               .tx_prepare_skb = mt76x2_tx_prepare_skb,
+               .tx_complete_skb = mt76x2_tx_complete_skb,
+               .rx_skb = mt76x2_queue_rx_skb,
+               .rx_poll_complete = mt76x2_rx_poll_complete,
+       };
+       struct ieee80211_hw *hw;
+       struct mt76x2_dev *dev;
+
+       hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x2_ops);
+       if (!hw)
+               return NULL;
+
+       dev = hw->priv;
+       dev->mt76.dev = pdev;
+       dev->mt76.hw = hw;
+       dev->mt76.drv = &drv_ops;
+       mutex_init(&dev->mutex);
+       spin_lock_init(&dev->irq_lock);
+
+       return dev;
+}
+
+static void mt76x2_regd_notifier(struct wiphy *wiphy,
+                                struct regulatory_request *request)
+{
+       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+       struct mt76x2_dev *dev = hw->priv;
+
+       dev->dfs_pd.region = request->dfs_region;
+}
+
+#define CCK_RATE(_idx, _rate) {                                        \
+       .bitrate = _rate,                                       \
+       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                 \
+       .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx,              \
+       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx),  \
+}
+
+#define OFDM_RATE(_idx, _rate) {                               \
+       .bitrate = _rate,                                       \
+       .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx,             \
+       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx,       \
+}
+
+static struct ieee80211_rate mt76x2_rates[] = {
+       CCK_RATE(0, 10),
+       CCK_RATE(1, 20),
+       CCK_RATE(2, 55),
+       CCK_RATE(3, 110),
+       OFDM_RATE(0, 60),
+       OFDM_RATE(1, 90),
+       OFDM_RATE(2, 120),
+       OFDM_RATE(3, 180),
+       OFDM_RATE(4, 240),
+       OFDM_RATE(5, 360),
+       OFDM_RATE(6, 480),
+       OFDM_RATE(7, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_ADHOC)
+       }, {
+               .max = 8,
+               .types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+                        BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+                        BIT(NL80211_IFTYPE_AP)
+        },
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+       {
+               .limits = if_limits,
+               .n_limits = ARRAY_SIZE(if_limits),
+               .max_interfaces = 8,
+               .num_different_channels = 1,
+               .beacon_int_infra_match = true,
+               .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+                                      BIT(NL80211_CHAN_WIDTH_20) |
+                                      BIT(NL80211_CHAN_WIDTH_40) |
+                                      BIT(NL80211_CHAN_WIDTH_80),
+       }
+};
+
+static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on,
+                                 u8 delay_off)
+{
+       struct mt76x2_dev *dev = container_of(mt76, struct mt76x2_dev,
+                                             mt76);
+       u32 val;
+
+       val = MT_LED_STATUS_DURATION(0xff) |
+             MT_LED_STATUS_OFF(delay_off) |
+             MT_LED_STATUS_ON(delay_on);
+
+       mt76_wr(dev, MT_LED_S0(mt76->led_pin), val);
+       mt76_wr(dev, MT_LED_S1(mt76->led_pin), val);
+
+       val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+             MT_LED_CTRL_KICK(mt76->led_pin);
+       if (mt76->led_al)
+               val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+       mt76_wr(dev, MT_LED_CTRL, val);
+}
+
+static int mt76x2_led_set_blink(struct led_classdev *led_cdev,
+                               unsigned long *delay_on,
+                               unsigned long *delay_off)
+{
+       struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+                                            led_cdev);
+       u8 delta_on, delta_off;
+
+       delta_off = max_t(u8, *delay_off / 10, 1);
+       delta_on = max_t(u8, *delay_on / 10, 1);
+
+       mt76x2_led_set_config(mt76, delta_on, delta_off);
+       return 0;
+}
+
+static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
+                                     enum led_brightness brightness)
+{
+       struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+                                            led_cdev);
+
+       if (!brightness)
+               mt76x2_led_set_config(mt76, 0, 0xff);
+       else
+               mt76x2_led_set_config(mt76, 0xff, 0);
+}
+
+int mt76x2_register_device(struct mt76x2_dev *dev)
+{
+       struct ieee80211_hw *hw = mt76_hw(dev);
+       struct wiphy *wiphy = hw->wiphy;
+       void *status_fifo;
+       int fifo_size;
+       int i, ret;
+
+       fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x2_tx_status));
+       status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
+       if (!status_fifo)
+               return -ENOMEM;
+
+       kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+
+       ret = mt76x2_init_hardware(dev);
+       if (ret)
+               return ret;
+
+       hw->queues = 4;
+       hw->max_rates = 1;
+       hw->max_report_rates = 7;
+       hw->max_rate_tries = 1;
+       hw->extra_tx_headroom = 2;
+
+       hw->sta_data_size = sizeof(struct mt76x2_sta);
+       hw->vif_data_size = sizeof(struct mt76x2_vif);
+
+       for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
+               u8 *addr = dev->macaddr_list[i].addr;
+
+               memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
+
+               if (!i)
+                       continue;
+
+               addr[0] |= BIT(1);
+               addr[0] ^= ((i - 1) << 2);
+       }
+       wiphy->addresses = dev->macaddr_list;
+       wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
+
+       wiphy->iface_combinations = if_comb;
+       wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+
+       wiphy->reg_notifier = mt76x2_regd_notifier;
+
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+       ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+       INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
+       INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+
+       dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+       dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+       mt76x2_dfs_init_detector(dev);
+
+       /* init led callbacks */
+       dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
+       dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
+
+       ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
+                                  ARRAY_SIZE(mt76x2_rates));
+       if (ret)
+               goto fail;
+
+       mt76x2_init_debugfs(dev);
+
+       return 0;
+
+fail:
+       mt76x2_stop_hardware(dev);
+       return ret;
+}
+
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
new file mode 100644 (file)
index 0000000..39fc1d7
--- /dev/null
@@ -0,0 +1,755 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_eeprom.h"
+#include "mt76x2_trace.h"
+
+void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
+{
+       idx &= 7;
+       mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
+       mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
+                      get_unaligned_le16(addr + 4));
+}
+
+static void
+mt76x2_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
+{
+       u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+       switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+       case MT_PHY_TYPE_OFDM:
+               if (idx >= 8)
+                       idx = 0;
+
+               if (status->band == NL80211_BAND_2GHZ)
+                       idx += 4;
+
+               status->rate_idx = idx;
+               return;
+       case MT_PHY_TYPE_CCK:
+               if (idx >= 8) {
+                       idx -= 8;
+                       status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+               }
+
+               if (idx >= 4)
+                       idx = 0;
+
+               status->rate_idx = idx;
+               return;
+       case MT_PHY_TYPE_HT_GF:
+               status->enc_flags |= RX_ENC_FLAG_HT_GF;
+               /* fall through */
+       case MT_PHY_TYPE_HT:
+               status->encoding = RX_ENC_HT;
+               status->rate_idx = idx;
+               break;
+       case MT_PHY_TYPE_VHT:
+               status->encoding = RX_ENC_VHT;
+               status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+               status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+               break;
+       default:
+               WARN_ON(1);
+               return;
+       }
+
+       if (rate & MT_RXWI_RATE_LDPC)
+               status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+       if (rate & MT_RXWI_RATE_SGI)
+               status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+       if (rate & MT_RXWI_RATE_STBC)
+               status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+       switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+       case MT_PHY_BW_20:
+               break;
+       case MT_PHY_BW_40:
+               status->bw = RATE_INFO_BW_40;
+               break;
+       case MT_PHY_BW_80:
+               status->bw = RATE_INFO_BW_80;
+               break;
+       default:
+               break;
+       }
+}
+
+static __le16
+mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
+                      const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+       u16 rateval;
+       u8 phy, rate_idx;
+       u8 nss = 1;
+       u8 bw = 0;
+
+       if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+               rate_idx = rate->idx;
+               nss = 1 + (rate->idx >> 4);
+               phy = MT_PHY_TYPE_VHT;
+               if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+                       bw = 2;
+               else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+                       bw = 1;
+       } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+               rate_idx = rate->idx;
+               nss = 1 + (rate->idx >> 3);
+               phy = MT_PHY_TYPE_HT;
+               if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+                       phy = MT_PHY_TYPE_HT_GF;
+               if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+                       bw = 1;
+       } else {
+               const struct ieee80211_rate *r;
+               int band = dev->mt76.chandef.chan->band;
+               u16 val;
+
+               r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+               if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+                       val = r->hw_value_short;
+               else
+                       val = r->hw_value;
+
+               phy = val >> 8;
+               rate_idx = val & 0xff;
+               bw = 0;
+       }
+
+       rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+       rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+       rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+       if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+               rateval |= MT_RXWI_RATE_SGI;
+
+       *nss_val = nss;
+       return cpu_to_le16(rateval);
+}
+
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
+{
+       u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
+       u32 bit = MT_WCID_DROP_MASK(idx);
+
+       /* prevent unnecessary writes */
+       if ((val & bit) != (bit * drop))
+               mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+                             const struct ieee80211_tx_rate *rate)
+{
+       spin_lock_bh(&dev->mt76.lock);
+       wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+       wcid->tx_rate_set = true;
+       spin_unlock_bh(&dev->mt76.lock);
+}
+
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+                          struct sk_buff *skb, struct mt76_wcid *wcid,
+                          struct ieee80211_sta *sta)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_rate *rate = &info->control.rates[0];
+       u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+       u16 txwi_flags = 0;
+       u8 nss;
+       s8 txpwr_adj, max_txpwr_adj;
+
+       memset(txwi, 0, sizeof(*txwi));
+
+       if (wcid)
+               txwi->wcid = wcid->idx;
+       else
+               txwi->wcid = 0xff;
+
+       txwi->pktid = 1;
+
+       spin_lock_bh(&dev->mt76.lock);
+       if (rate->idx < 0 || !rate->count) {
+               txwi->rate = wcid->tx_rate;
+               max_txpwr_adj = wcid->max_txpwr_adj;
+               nss = wcid->tx_rate_nss;
+       } else {
+               txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
+               max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
+       }
+       spin_unlock_bh(&dev->mt76.lock);
+
+       txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
+                                           max_txpwr_adj);
+       txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+       if (mt76xx_rev(dev) >= MT76XX_REV_E4)
+               txwi->txstream = 0x13;
+       else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
+                !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+               txwi->txstream = 0x93;
+
+       if (info->flags & IEEE80211_TX_CTL_LDPC)
+               txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+       if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+               txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+       if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+               txwi_flags |= MT_TXWI_FLAGS_MMPS;
+       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+               txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+       if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+               txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+       if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+               txwi->pktid |= MT_TXWI_PKTID_PROBE;
+       if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+               u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+               ba_size <<= sta->ht_cap.ampdu_factor;
+               ba_size = min_t(int, 63, ba_size - 1);
+               if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+                       ba_size = 0;
+               txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+               txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+                        FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+                                   sta->ht_cap.ampdu_density);
+       }
+
+       txwi->flags |= cpu_to_le16(txwi_flags);
+       txwi->len_ctl = cpu_to_le16(skb->len);
+}
+
+static void mt76x2_remove_hdr_pad(struct sk_buff *skb)
+{
+       int len = ieee80211_get_hdrlen_from_skb(skb);
+
+       memmove(skb->data + 2, skb->data, len);
+       skb_pull(skb, 2);
+}
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+                         void *rxi)
+{
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct mt76x2_rxwi *rxwi = rxi;
+       u32 ctl = le32_to_cpu(rxwi->ctl);
+       u16 rate = le16_to_cpu(rxwi->rate);
+       int len;
+
+       if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD))
+               mt76x2_remove_hdr_pad(skb);
+
+       if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
+               status->flag |= RX_FLAG_DECRYPTED;
+               status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
+       }
+
+       len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+       if (WARN_ON_ONCE(len > skb->len))
+               return -EINVAL;
+
+       pskb_trim(skb, len);
+       status->chains = BIT(0) | BIT(1);
+       status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0);
+       status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1);
+       status->signal = max(status->chain_signal[0], status->chain_signal[1]);
+       status->freq = dev->mt76.chandef.chan->center_freq;
+       status->band = dev->mt76.chandef.chan->band;
+
+       mt76x2_mac_process_rate(status, rate);
+
+       return 0;
+}
+
+static void
+mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+                          enum nl80211_band band)
+{
+       u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+       txrate->idx = 0;
+       txrate->flags = 0;
+       txrate->count = 1;
+
+       switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+       case MT_PHY_TYPE_OFDM:
+               if (band == NL80211_BAND_2GHZ)
+                       idx += 4;
+
+               txrate->idx = idx;
+               return;
+       case MT_PHY_TYPE_CCK:
+               if (idx >= 8)
+                       idx -= 8;
+
+               txrate->idx = idx;
+               return;
+       case MT_PHY_TYPE_HT_GF:
+               txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+               /* fall through */
+       case MT_PHY_TYPE_HT:
+               txrate->flags |= IEEE80211_TX_RC_MCS;
+               txrate->idx = idx;
+               break;
+       case MT_PHY_TYPE_VHT:
+               txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+               txrate->idx = idx;
+               break;
+       default:
+               WARN_ON(1);
+               return;
+       }
+
+       switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+       case MT_PHY_BW_20:
+               break;
+       case MT_PHY_BW_40:
+               txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+               break;
+       case MT_PHY_BW_80:
+               txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+               break;
+       default:
+               WARN_ON(1);
+               break;
+       }
+
+       if (rate & MT_RXWI_RATE_SGI)
+               txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+}
+
+static void
+mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
+                         struct ieee80211_tx_info *info,
+                         struct mt76x2_tx_status *st, int n_frames)
+{
+       struct ieee80211_tx_rate *rate = info->status.rates;
+       int cur_idx, last_rate;
+       int i;
+
+       if (!n_frames)
+               return;
+
+       last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+       mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
+                                dev->mt76.chandef.chan->band);
+       if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+               rate[last_rate + 1].idx = -1;
+
+       cur_idx = rate[last_rate].idx + st->retry;
+       for (i = 0; i <= last_rate; i++) {
+               rate[i].flags = rate[last_rate].flags;
+               rate[i].idx = max_t(int, 0, cur_idx - i);
+               rate[i].count = 1;
+       }
+
+       if (last_rate > 0)
+               rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+       info->status.ampdu_len = n_frames;
+       info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+       if (st->pktid & MT_TXWI_PKTID_PROBE)
+               info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+       if (st->aggr)
+               info->flags |= IEEE80211_TX_CTL_AMPDU |
+                              IEEE80211_TX_STAT_AMPDU;
+
+       if (!st->ack_req)
+               info->flags |= IEEE80211_TX_CTL_NO_ACK;
+       else if (st->success)
+               info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+static void
+mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
+                     u8 *update)
+{
+       struct ieee80211_tx_info info = {};
+       struct ieee80211_sta *sta = NULL;
+       struct mt76_wcid *wcid = NULL;
+       struct mt76x2_sta *msta = NULL;
+
+       rcu_read_lock();
+       if (stat->wcid < ARRAY_SIZE(dev->wcid))
+               wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+       if (wcid) {
+               void *priv;
+
+               priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
+               sta = container_of(priv, struct ieee80211_sta,
+                                  drv_priv);
+       }
+
+       if (msta && stat->aggr) {
+               u32 stat_val, stat_cache;
+
+               stat_val = stat->rate;
+               stat_val |= ((u32) stat->retry) << 16;
+               stat_cache = msta->status.rate;
+               stat_cache |= ((u32) msta->status.retry) << 16;
+
+               if (*update == 0 && stat_val == stat_cache &&
+                   stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+                       msta->n_frames++;
+                       goto out;
+               }
+
+               mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
+                                         msta->n_frames);
+
+               msta->status = *stat;
+               msta->n_frames = 1;
+               *update = 0;
+       } else {
+               mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
+               *update = 1;
+       }
+
+       ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+       rcu_read_unlock();
+}
+
+void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
+{
+       struct mt76x2_tx_status stat = {};
+       unsigned long flags;
+       u8 update = 1;
+
+       if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+               return;
+
+       trace_mac_txstat_poll(dev);
+
+       while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
+               u32 stat1, stat2;
+
+               spin_lock_irqsave(&dev->irq_lock, flags);
+               stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+               stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+               if (!(stat1 & MT_TX_STAT_FIFO_VALID)) {
+                       spin_unlock_irqrestore(&dev->irq_lock, flags);
+                       break;
+               }
+
+               spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+               stat.valid = 1;
+               stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+               stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+               stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+               stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+               stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+               stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+               stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+               trace_mac_txstat_fetch(dev, &stat);
+
+               if (!irq) {
+                       mt76x2_send_tx_status(dev, &stat, &update);
+                       continue;
+               }
+
+               kfifo_put(&dev->txstatus_fifo, stat);
+       }
+}
+
+static void
+mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
+                       void *txwi_ptr)
+{
+       struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
+       struct mt76x2_txwi *txwi = txwi_ptr;
+
+       mt76x2_mac_poll_tx_status(dev, false);
+
+       txi->tries = 0;
+       txi->jiffies = jiffies;
+       txi->wcid = txwi->wcid;
+       txi->pktid = txwi->pktid;
+       trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
+       mt76x2_tx_complete(dev, skb);
+}
+
+void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
+{
+       struct mt76x2_tx_status stat;
+       u8 update = 1;
+
+       while (kfifo_get(&dev->txstatus_fifo, &stat))
+               mt76x2_send_tx_status(dev, &stat, &update);
+}
+
+void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+                           struct mt76_queue_entry *e, bool flush)
+{
+       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+       if (e->txwi)
+               mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
+       else
+               dev_kfree_skb_any(e->skb);
+}
+
+static enum mt76x2_cipher_type
+mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+       memset(key_data, 0, 32);
+       if (!key)
+               return MT_CIPHER_NONE;
+
+       if (key->keylen > 32)
+               return MT_CIPHER_NONE;
+
+       memcpy(key_data, key->key, key->keylen);
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+               return MT_CIPHER_WEP40;
+       case WLAN_CIPHER_SUITE_WEP104:
+               return MT_CIPHER_WEP104;
+       case WLAN_CIPHER_SUITE_TKIP:
+               return MT_CIPHER_TKIP;
+       case WLAN_CIPHER_SUITE_CCMP:
+               return MT_CIPHER_AES_CCMP;
+       default:
+               return MT_CIPHER_NONE;
+       }
+}
+
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+       struct mt76_wcid_addr addr = {};
+       u32 attr;
+
+       attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+              FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+       mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+       mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+       mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+
+       if (idx >= 128)
+               return;
+
+       if (mac)
+               memcpy(addr.macaddr, mac, ETH_ALEN);
+
+       mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+                           struct ieee80211_key_conf *key)
+{
+       enum mt76x2_cipher_type cipher;
+       u8 key_data[32];
+       u8 iv_data[8];
+
+       cipher = mt76x2_mac_get_key_info(key, key_data);
+       if (cipher == MT_CIPHER_NONE && key)
+               return -EOPNOTSUPP;
+
+       mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+       mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+       memset(iv_data, 0, sizeof(iv_data));
+       if (key) {
+               mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+                              !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+               iv_data[3] = key->keyidx << 6;
+               if (cipher >= MT_CIPHER_TKIP)
+                       iv_data[3] |= 0x20;
+       }
+
+       mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+       return 0;
+}
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+                             struct ieee80211_key_conf *key)
+{
+       enum mt76x2_cipher_type cipher;
+       u8 key_data[32];
+       u32 val;
+
+       cipher = mt76x2_mac_get_key_info(key, key_data);
+       if (cipher == MT_CIPHER_NONE && key)
+               return -EOPNOTSUPP;
+
+       val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+       val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+       val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+       mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+       mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+                    sizeof(key_data));
+
+       return 0;
+}
+
+static int
+mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
+{
+       int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
+       struct mt76x2_txwi txwi;
+
+       if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
+               return -ENOSPC;
+
+       mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL);
+       txwi.flags |= cpu_to_le16(MT_TXWI_FLAGS_TS);
+
+       mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+       offset += sizeof(txwi);
+
+       mt76_wr_copy(dev, offset, skb->data, skb->len);
+       return 0;
+}
+
+static int
+__mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
+{
+       int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
+       int beacon_addr = dev->beacon_offsets[bcn_idx];
+       int ret = 0;
+       int i;
+
+       /* Prevent corrupt transmissions during update */
+       mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
+
+       if (skb) {
+               ret = mt76_write_beacon(dev, beacon_addr, skb);
+               if (!ret)
+                       dev->beacon_data_mask |= BIT(bcn_idx) &
+                                                dev->beacon_mask;
+       } else {
+               dev->beacon_data_mask &= ~BIT(bcn_idx);
+               for (i = 0; i < beacon_len; i += 4)
+                       mt76_wr(dev, beacon_addr + i, 0);
+       }
+
+       mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
+
+       return ret;
+}
+
+int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
+                         struct sk_buff *skb)
+{
+       bool force_update = false;
+       int bcn_idx = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
+               if (vif_idx == i) {
+                       force_update = !!dev->beacons[i] ^ !!skb;
+
+                       if (dev->beacons[i])
+                               dev_kfree_skb(dev->beacons[i]);
+
+                       dev->beacons[i] = skb;
+                       __mt76x2_mac_set_beacon(dev, bcn_idx, skb);
+               } else if (force_update && dev->beacons[i]) {
+                       __mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
+               }
+
+               bcn_idx += !!dev->beacons[i];
+       }
+
+       for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
+               if (!(dev->beacon_data_mask & BIT(i)))
+                       break;
+
+               __mt76x2_mac_set_beacon(dev, i, NULL);
+       }
+
+       mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
+                      bcn_idx - 1);
+       return 0;
+}
+
+void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
+{
+       u8 old_mask = dev->beacon_mask;
+       bool en;
+       u32 reg;
+
+       if (val) {
+               dev->beacon_mask |= BIT(vif_idx);
+       } else {
+               dev->beacon_mask &= ~BIT(vif_idx);
+               mt76x2_mac_set_beacon(dev, vif_idx, NULL);
+       }
+
+       if (!!old_mask == !!dev->beacon_mask)
+               return;
+
+       en = dev->beacon_mask;
+
+       mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
+       reg = MT_BEACON_TIME_CFG_BEACON_TX |
+             MT_BEACON_TIME_CFG_TBTT_EN |
+             MT_BEACON_TIME_CFG_TIMER_EN;
+       mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
+
+       if (en)
+               mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+       else
+               mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+}
+
+void mt76x2_update_channel(struct mt76_dev *mdev)
+{
+       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+       struct mt76_channel_state *state;
+       u32 active, busy;
+
+       state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
+
+       busy = mt76_rr(dev, MT_CH_BUSY);
+       active = busy + mt76_rr(dev, MT_CH_IDLE);
+
+       spin_lock_bh(&dev->mt76.cc_lock);
+       state->cc_busy += busy;
+       state->cc_active += active;
+       spin_unlock_bh(&dev->mt76.cc_lock);
+}
+
+void mt76x2_mac_work(struct work_struct *work)
+{
+       struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev,
+                                           mac_work.work);
+       int i, idx;
+
+       mt76x2_update_channel(&dev->mt76);
+       for (i = 0, idx = 0; i < 16; i++) {
+               u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+               dev->aggr_stats[idx++] += val & 0xffff;
+               dev->aggr_stats[idx++] += val >> 16;
+       }
+
+       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+                                    MT_CALIBRATE_INTERVAL);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
new file mode 100644 (file)
index 0000000..8a8a25e
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_MAC_H
+#define __MT76x2_MAC_H
+
+#include "mt76.h"
+
+struct mt76x2_dev;
+struct mt76x2_sta;
+struct mt76x2_vif;
+struct mt76x2_txwi;
+
+struct mt76x2_tx_status {
+       u8 valid:1;
+       u8 success:1;
+       u8 aggr:1;
+       u8 ack_req:1;
+       u8 wcid;
+       u8 pktid;
+       u8 retry;
+       u16 rate;
+} __packed __aligned(2);
+
+struct mt76x2_tx_info {
+       unsigned long jiffies;
+       u8 tries;
+
+       u8 wcid;
+       u8 pktid;
+       u8 retry;
+};
+
+struct mt76x2_rxwi {
+       __le32 rxinfo;
+
+       __le32 ctl;
+
+       __le16 tid_sn;
+       __le16 rate;
+
+       u8 rssi[4];
+
+       __le32 bbp_rxinfo[4];
+};
+
+#define MT_RXINFO_BA                   BIT(0)
+#define MT_RXINFO_DATA                 BIT(1)
+#define MT_RXINFO_NULL                 BIT(2)
+#define MT_RXINFO_FRAG                 BIT(3)
+#define MT_RXINFO_UNICAST              BIT(4)
+#define MT_RXINFO_MULTICAST            BIT(5)
+#define MT_RXINFO_BROADCAST            BIT(6)
+#define MT_RXINFO_MYBSS                        BIT(7)
+#define MT_RXINFO_CRCERR               BIT(8)
+#define MT_RXINFO_ICVERR               BIT(9)
+#define MT_RXINFO_MICERR               BIT(10)
+#define MT_RXINFO_AMSDU                        BIT(11)
+#define MT_RXINFO_HTC                  BIT(12)
+#define MT_RXINFO_RSSI                 BIT(13)
+#define MT_RXINFO_L2PAD                        BIT(14)
+#define MT_RXINFO_AMPDU                        BIT(15)
+#define MT_RXINFO_DECRYPT              BIT(16)
+#define MT_RXINFO_BSSIDX3              BIT(17)
+#define MT_RXINFO_WAPI_KEY             BIT(18)
+#define MT_RXINFO_PN_LEN               GENMASK(21, 19)
+#define MT_RXINFO_SW_FTYPE0            BIT(22)
+#define MT_RXINFO_SW_FTYPE1            BIT(23)
+#define MT_RXINFO_PROBE_RESP           BIT(24)
+#define MT_RXINFO_BEACON               BIT(25)
+#define MT_RXINFO_DISASSOC             BIT(26)
+#define MT_RXINFO_DEAUTH               BIT(27)
+#define MT_RXINFO_ACTION               BIT(28)
+#define MT_RXINFO_TCP_SUM_ERR          BIT(30)
+#define MT_RXINFO_IP_SUM_ERR           BIT(31)
+
+#define MT_RXWI_CTL_WCID               GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX            GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX            GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF                        GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN           GENMASK(29, 16)
+#define MT_RXWI_CTL_EOF                        BIT(31)
+
+#define MT_RXWI_TID                    GENMASK(3, 0)
+#define MT_RXWI_SN                     GENMASK(15, 4)
+
+#define MT_RXWI_RATE_INDEX             GENMASK(5, 0)
+#define MT_RXWI_RATE_LDPC              BIT(6)
+#define MT_RXWI_RATE_BW                        GENMASK(8, 7)
+#define MT_RXWI_RATE_SGI               BIT(9)
+#define MT_RXWI_RATE_STBC              BIT(10)
+#define MT_RXWI_RATE_LDPC_EXSYM                BIT(11)
+#define MT_RXWI_RATE_PHY               GENMASK(15, 13)
+
+#define MT_RATE_INDEX_VHT_IDX          GENMASK(3, 0)
+#define MT_RATE_INDEX_VHT_NSS          GENMASK(5, 4)
+
+#define MT_TX_PWR_ADJ                  GENMASK(3, 0)
+
+enum mt76x2_phy_bandwidth {
+       MT_PHY_BW_20,
+       MT_PHY_BW_40,
+       MT_PHY_BW_80,
+};
+
+#define MT_TXWI_FLAGS_FRAG             BIT(0)
+#define MT_TXWI_FLAGS_MMPS             BIT(1)
+#define MT_TXWI_FLAGS_CFACK            BIT(2)
+#define MT_TXWI_FLAGS_TS               BIT(3)
+#define MT_TXWI_FLAGS_AMPDU            BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY     GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP             GENMASK(9, 8)
+#define MT_TXWI_FLAGS_NDPS             BIT(10)
+#define MT_TXWI_FLAGS_RTSBWSIG         BIT(11)
+#define MT_TXWI_FLAGS_NDP_BW           GENMASK(13, 12)
+#define MT_TXWI_FLAGS_SOUND            BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT      BIT(15)
+
+#define MT_TXWI_ACK_CTL_REQ            BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ           BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW      GENMASK(7, 2)
+
+#define MT_TXWI_PKTID_PROBE            BIT(7)
+
+struct mt76x2_txwi {
+       __le16 flags;
+       __le16 rate;
+       u8 ack_ctl;
+       u8 wcid;
+       __le16 len_ctl;
+       __le32 iv;
+       __le32 eiv;
+       u8 aid;
+       u8 txstream;
+       u8 ctl2;
+       u8 pktid;
+} __packed __aligned(4);
+
+static inline struct mt76x2_tx_info *
+mt76x2_skb_tx_info(struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+       return (void *) info->status.status_driver_data;
+}
+
+int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard);
+int mt76x2_mac_start(struct mt76x2_dev *dev);
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force);
+void mt76x2_mac_resume(struct mt76x2_dev *dev);
+void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr);
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+                         void *rxi);
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+                          struct sk_buff *skb, struct mt76_wcid *wcid,
+                          struct ieee80211_sta *sta);
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+                           struct ieee80211_key_conf *key);
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+                             const struct ieee80211_tx_rate *rate);
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop);
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+                               struct ieee80211_key_conf *key);
+
+int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
+                         struct sk_buff *skb);
+void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val);
+
+void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq);
+void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev);
+
+void mt76x2_mac_work(struct work_struct *work);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
new file mode 100644 (file)
index 0000000..2cef48e
--- /dev/null
@@ -0,0 +1,545 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+static int
+mt76x2_start(struct ieee80211_hw *hw)
+{
+       struct mt76x2_dev *dev = hw->priv;
+       int ret;
+
+       mutex_lock(&dev->mutex);
+
+       ret = mt76x2_mac_start(dev);
+       if (ret)
+               goto out;
+
+       ret = mt76x2_phy_start(dev);
+       if (ret)
+               goto out;
+
+       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+                                    MT_CALIBRATE_INTERVAL);
+
+       set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+out:
+       mutex_unlock(&dev->mutex);
+       return ret;
+}
+
+static void
+mt76x2_stop(struct ieee80211_hw *hw)
+{
+       struct mt76x2_dev *dev = hw->priv;
+
+       mutex_lock(&dev->mutex);
+       clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+       mt76x2_stop_hardware(dev);
+       mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
+{
+       struct mt76_txq *mtxq;
+
+       if (!txq)
+               return;
+
+       mtxq = (struct mt76_txq *) txq->drv_priv;
+       if (txq->sta) {
+               struct mt76x2_sta *sta;
+
+               sta = (struct mt76x2_sta *) txq->sta->drv_priv;
+               mtxq->wcid = &sta->wcid;
+       } else {
+               struct mt76x2_vif *mvif;
+
+               mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
+               mtxq->wcid = &mvif->group_wcid;
+       }
+
+       mt76_txq_init(&dev->mt76, txq);
+}
+
+static int
+mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct mt76x2_dev *dev = hw->priv;
+       struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+       unsigned int idx = 0;
+       int ret = 0;
+
+       if (vif->addr[0] & BIT(1))
+               idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
+
+       /*
+        * Client mode typically only has one configurable BSSID register,
+        * which is used for bssidx=0. This is linked to the MAC address.
+        * Since mac80211 allows changing interface types, and we cannot
+        * force the use of the primary MAC address for a station mode
+        * interface, we need some other way of configuring a per-interface
+        * remote BSSID.
+        * The hardware provides an AP-Client feature, where bssidx 0-7 are
+        * used for AP mode and bssidx 8-15 for client mode.
+        * We shift the station interface bss index by 8 to force the
+        * hardware to recognize the BSSID.
+        * The resulting bssidx mismatch for unicast frames is ignored by hw.
+        */
+       if (vif->type == NL80211_IFTYPE_STATION)
+               idx += 8;
+
+       mvif->idx = idx;
+       mvif->group_wcid.idx = 254 - idx;
+       mvif->group_wcid.hw_key_idx = -1;
+       mt76x2_txq_init(dev, vif->txq);
+
+       return ret;
+}
+
+static void
+mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct mt76x2_dev *dev = hw->priv;
+
+       mt76_txq_remove(&dev->mt76, vif->txq);
+}
+
+static int
+mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
+{
+       int ret;
+
+       mt76_set_channel(&dev->mt76);
+
+       tasklet_disable(&dev->pre_tbtt_tasklet);
+       cancel_delayed_work_sync(&dev->cal_work);
+
+       mt76x2_mac_stop(dev, true);
+       ret = mt76x2_phy_set_channel(dev, chandef);
+
+       /* channel cycle counters read-and-clear */
+       mt76_rr(dev, MT_CH_IDLE);
+       mt76_rr(dev, MT_CH_BUSY);
+
+       mt76x2_dfs_init_params(dev);
+
+       mt76x2_mac_resume(dev);
+       tasklet_enable(&dev->pre_tbtt_tasklet);
+
+       return ret;
+}
+
+static int
+mt76x2_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct mt76x2_dev *dev = hw->priv;
+       int ret = 0;
+
+       mutex_lock(&dev->mutex);
+
+       if (changed & IEEE80211_CONF_CHANGE_POWER) {
+               dev->txpower_conf = hw->conf.power_level * 2;
+
+               if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
+                       mt76x2_phy_set_txpower(dev);
+                       mt76x2_tx_set_txpwr_auto(dev, dev->txpower_conf);
+               }
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               ieee80211_stop_queues(hw);
+               ret = mt76x2_set_channel(dev, &hw->conf.chandef);
+               ieee80211_wake_queues(hw);
+       }
+
+       mutex_unlock(&dev->mutex);
+
+       return ret;
+}
+
+static void
+mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+                       unsigned int *total_flags, u64 multicast)
+{
+       struct mt76x2_dev *dev = hw->priv;
+       u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+               flags |= *total_flags & FIF_##_flag;                    \
+               dev->rxfilter &= ~(_hw);                                \
+               dev->rxfilter |= !(flags & FIF_##_flag) * (_hw);        \
+       } while (0)
+
+       mutex_lock(&dev->mutex);
+
+       dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+       MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+       MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+       MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+                            MT_RX_FILTR_CFG_CTS |
+                            MT_RX_FILTR_CFG_CFEND |
+                            MT_RX_FILTR_CFG_CFACK |
+                            MT_RX_FILTR_CFG_BA |
+                            MT_RX_FILTR_CFG_CTRL_RSV);
+       MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+       *total_flags = flags;
+       mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+       mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                       struct ieee80211_bss_conf *info, u32 changed)
+{
+       struct mt76x2_dev *dev = hw->priv;
+       struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+
+       mutex_lock(&dev->mutex);
+
+       if (changed & BSS_CHANGED_BSSID)
+               mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
+
+       if (changed & BSS_CHANGED_BEACON_INT)
+               mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+                              MT_BEACON_TIME_CFG_INTVAL,
+                              info->beacon_int << 4);
+
+       if (changed & BSS_CHANGED_BEACON_ENABLED) {
+               tasklet_disable(&dev->pre_tbtt_tasklet);
+               mt76x2_mac_set_beacon_enable(dev, mvif->idx,
+                                            info->enable_beacon);
+               tasklet_enable(&dev->pre_tbtt_tasklet);
+       }
+
+       if (changed & BSS_CHANGED_ERP_SLOT) {
+               int slottime = info->use_short_slot ? 9 : 20;
+
+               dev->slottime = slottime;
+               mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+                              MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+       }
+
+       mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+              struct ieee80211_sta *sta)
+{
+       struct mt76x2_dev *dev = hw->priv;
+       struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+       struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+       int ret = 0;
+       int idx = 0;
+       int i;
+
+       mutex_lock(&dev->mutex);
+
+       idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+       if (idx < 0) {
+               ret = -ENOSPC;
+               goto out;
+       }
+
+       msta->wcid.idx = idx;
+       msta->wcid.hw_key_idx = -1;
+       mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+       mt76x2_mac_wcid_set_drop(dev, idx, false);
+       for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+               mt76x2_txq_init(dev, sta->txq[i]);
+
+       rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+
+out:
+       mutex_unlock(&dev->mutex);
+
+       return ret;
+}
+
+static int
+mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 struct ieee80211_sta *sta)
+{
+       struct mt76x2_dev *dev = hw->priv;
+       struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+       int idx = msta->wcid.idx;
+       int i;
+
+       mutex_lock(&dev->mutex);
+       rcu_assign_pointer(dev->wcid[idx], NULL);
+       for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+               mt76_txq_remove(&dev->mt76, sta->txq[i]);
+       mt76x2_mac_wcid_set_drop(dev, idx, true);
+       mt76_wcid_free(dev->wcid_mask, idx);
+       mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
+       mutex_unlock(&dev->mutex);
+
+       return 0;
+}
+
+static void
+mt76x2_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+       struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+       struct mt76x2_dev *dev = hw->priv;
+       int idx = msta->wcid.idx;
+
+       switch (cmd) {
+       case STA_NOTIFY_SLEEP:
+               mt76x2_mac_wcid_set_drop(dev, idx, true);
+               mt76_stop_tx_queues(&dev->mt76, sta, true);
+               break;
+       case STA_NOTIFY_AWAKE:
+               mt76x2_mac_wcid_set_drop(dev, idx, false);
+               break;
+       }
+}
+
+static int
+mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+              struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+              struct ieee80211_key_conf *key)
+{
+       struct mt76x2_dev *dev = hw->priv;
+       struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+       struct mt76x2_sta *msta;
+       struct mt76_wcid *wcid;
+       int idx = key->keyidx;
+       int ret;
+
+       /*
+        * The hardware does not support per-STA RX GTK, fall back
+        * to software mode for these.
+        */
+       if ((vif->type == NL80211_IFTYPE_ADHOC ||
+            vif->type == NL80211_IFTYPE_MESH_POINT) &&
+           (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+            key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               return -EOPNOTSUPP;
+
+       msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
+       wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+       if (cmd == SET_KEY) {
+               key->hw_key_idx = wcid->idx;
+               wcid->hw_key_idx = idx;
+       } else {
+               if (idx == wcid->hw_key_idx)
+                       wcid->hw_key_idx = -1;
+
+               key = NULL;
+       }
+
+       if (!msta) {
+               if (key || wcid->hw_key_idx == idx) {
+                       ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
+                       if (ret)
+                               return ret;
+               }
+
+               return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
+       }
+
+       return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int
+mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+              const struct ieee80211_tx_queue_params *params)
+{
+       struct mt76x2_dev *dev = hw->priv;
+       u8 cw_min = 5, cw_max = 10;
+       u32 val;
+
+       if (params->cw_min)
+               cw_min = fls(params->cw_min);
+       if (params->cw_max)
+               cw_max = fls(params->cw_max);
+
+       val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+             FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+             FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+             FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+       mt76_wr(dev, MT_EDCA_CFG_AC(queue), val);
+
+       val = mt76_rr(dev, MT_WMM_TXOP(queue));
+       val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(queue));
+       val |= params->txop << MT_WMM_TXOP_SHIFT(queue);
+       mt76_wr(dev, MT_WMM_TXOP(queue), val);
+
+       val = mt76_rr(dev, MT_WMM_AIFSN);
+       val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(queue));
+       val |= params->aifs << MT_WMM_AIFSN_SHIFT(queue);
+       mt76_wr(dev, MT_WMM_AIFSN, val);
+
+       val = mt76_rr(dev, MT_WMM_CWMIN);
+       val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(queue));
+       val |= cw_min << MT_WMM_CWMIN_SHIFT(queue);
+       mt76_wr(dev, MT_WMM_CWMIN, val);
+
+       val = mt76_rr(dev, MT_WMM_CWMAX);
+       val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(queue));
+       val |= cw_max << MT_WMM_CWMAX_SHIFT(queue);
+       mt76_wr(dev, MT_WMM_CWMAX, val);
+
+       return 0;
+}
+
+static void
+mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+              const u8 *mac)
+{
+       struct mt76x2_dev *dev = hw->priv;
+
+       tasklet_disable(&dev->pre_tbtt_tasklet);
+       set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct mt76x2_dev *dev = hw->priv;
+
+       clear_bit(MT76_SCANNING, &dev->mt76.state);
+       tasklet_enable(&dev->pre_tbtt_tasklet);
+       mt76_txq_schedule_all(&dev->mt76);
+}
+
+static void
+mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+            u32 queues, bool drop)
+{
+}
+
+static int
+mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
+{
+       struct mt76x2_dev *dev = hw->priv;
+
+       *dbm = dev->txpower_cur / 2;
+       return 0;
+}
+
+static int
+mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                   struct ieee80211_ampdu_params *params)
+{
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       struct ieee80211_sta *sta = params->sta;
+       struct mt76x2_dev *dev = hw->priv;
+       struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+       struct ieee80211_txq *txq = sta->txq[params->tid];
+       struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
+
+       if (!txq)
+               return -EINVAL;
+
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+               break;
+       case IEEE80211_AMPDU_RX_STOP:
+               mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+                          BIT(16 + tid));
+               break;
+       case IEEE80211_AMPDU_TX_OPERATIONAL:
+               mtxq->aggr = true;
+               mtxq->send_bar = false;
+               ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+               break;
+       case IEEE80211_AMPDU_TX_STOP_FLUSH:
+       case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+               mtxq->aggr = false;
+               ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+               break;
+       case IEEE80211_AMPDU_TX_START:
+               mtxq->agg_ssn = *ssn << 4;
+               ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               break;
+       case IEEE80211_AMPDU_TX_STOP_CONT:
+               mtxq->aggr = false;
+               ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               break;
+       }
+
+       return 0;
+}
+
+static void
+mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                          struct ieee80211_sta *sta)
+{
+       struct mt76x2_dev *dev = hw->priv;
+       struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+       struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+       struct ieee80211_tx_rate rate = {};
+
+       if (!rates)
+               return;
+
+       rate.idx = rates->rate[0].idx;
+       rate.flags = rates->rate[0].flags;
+       mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+       msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
+}
+
+static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
+                                     s16 coverage_class)
+{
+       struct mt76x2_dev *dev = hw->priv;
+
+       mutex_lock(&dev->mutex);
+       dev->coverage_class = coverage_class;
+       mt76x2_set_tx_ackto(dev);
+       mutex_unlock(&dev->mutex);
+}
+
+const struct ieee80211_ops mt76x2_ops = {
+       .tx = mt76x2_tx,
+       .start = mt76x2_start,
+       .stop = mt76x2_stop,
+       .add_interface = mt76x2_add_interface,
+       .remove_interface = mt76x2_remove_interface,
+       .config = mt76x2_config,
+       .configure_filter = mt76x2_configure_filter,
+       .bss_info_changed = mt76x2_bss_info_changed,
+       .sta_add = mt76x2_sta_add,
+       .sta_remove = mt76x2_sta_remove,
+       .sta_notify = mt76x2_sta_notify,
+       .set_key = mt76x2_set_key,
+       .conf_tx = mt76x2_conf_tx,
+       .sw_scan_start = mt76x2_sw_scan,
+       .sw_scan_complete = mt76x2_sw_scan_complete,
+       .flush = mt76x2_flush,
+       .ampdu_action = mt76x2_ampdu_action,
+       .get_txpower = mt76x2_get_txpower,
+       .wake_tx_queue = mt76_wake_tx_queue,
+       .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
+       .release_buffered_frames = mt76_release_buffered_frames,
+       .set_coverage_class = mt76x2_set_coverage_class,
+       .get_survey = mt76_get_survey,
+};
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
new file mode 100644 (file)
index 0000000..d45737e
--- /dev/null
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_dma.h"
+#include "mt76x2_eeprom.h"
+
+struct mt76x2_fw_header {
+       __le32 ilm_len;
+       __le32 dlm_len;
+       __le16 build_ver;
+       __le16 fw_ver;
+       u8 pad[4];
+       char build_time[16];
+};
+
+struct mt76x2_patch_header {
+       char build_time[16];
+       char platform[4];
+       char hw_version[4];
+       char patch_version[4];
+       u8 pad[2];
+};
+
+static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
+{
+       struct sk_buff *skb;
+
+       skb = alloc_skb(len, GFP_KERNEL);
+       memcpy(skb_put(skb, len), data, len);
+
+       return skb;
+}
+
+static struct sk_buff *
+mt76x2_mcu_get_response(struct mt76x2_dev *dev, unsigned long expires)
+{
+       unsigned long timeout;
+
+       if (!time_is_after_jiffies(expires))
+               return NULL;
+
+       timeout = expires - jiffies;
+       wait_event_timeout(dev->mcu.wait, !skb_queue_empty(&dev->mcu.res_q),
+                          timeout);
+       return skb_dequeue(&dev->mcu.res_q);
+}
+
+static int
+mt76x2_mcu_msg_send(struct mt76x2_dev *dev, struct sk_buff *skb,
+                   enum mcu_cmd cmd)
+{
+       unsigned long expires = jiffies + HZ;
+       int ret;
+       u8 seq;
+
+       if (!skb)
+               return -EINVAL;
+
+       mutex_lock(&dev->mcu.mutex);
+
+       seq = ++dev->mcu.msg_seq & 0xf;
+       if (!seq)
+               seq = ++dev->mcu.msg_seq & 0xf;
+
+       ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
+       if (ret)
+               goto out;
+
+       while (1) {
+               u32 *rxfce;
+               bool check_seq = false;
+
+               skb = mt76x2_mcu_get_response(dev, expires);
+               if (!skb) {
+                       dev_err(dev->mt76.dev,
+                               "MCU message %d (seq %d) timed out\n", cmd,
+                               seq);
+                       ret = -ETIMEDOUT;
+                       break;
+               }
+
+               rxfce = (u32 *) skb->cb;
+
+               if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
+                       check_seq = true;
+
+               dev_kfree_skb(skb);
+               if (check_seq)
+                       break;
+       }
+
+out:
+       mutex_unlock(&dev->mcu.mutex);
+
+       return ret;
+}
+
+static int
+mt76pci_load_rom_patch(struct mt76x2_dev *dev)
+{
+       const struct firmware *fw = NULL;
+       struct mt76x2_patch_header *hdr;
+       bool rom_protect = !is_mt7612(dev);
+       int len, ret = 0;
+       __le32 *cur;
+       u32 patch_mask, patch_reg;
+
+       if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+               dev_err(dev->mt76.dev,
+                       "Could not get hardware semaphore for ROM PATCH\n");
+               return -ETIMEDOUT;
+       }
+
+       if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+               patch_mask = BIT(0);
+               patch_reg = MT_MCU_CLOCK_CTL;
+       } else {
+               patch_mask = BIT(1);
+               patch_reg = MT_MCU_COM_REG0;
+       }
+
+       if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+               dev_info(dev->mt76.dev, "ROM patch already applied\n");
+               goto out;
+       }
+
+       ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
+       if (ret)
+               goto out;
+
+       if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+               ret = -EIO;
+               dev_err(dev->mt76.dev, "Failed to load firmware\n");
+               goto out;
+       }
+
+       hdr = (struct mt76x2_patch_header *) fw->data;
+       dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
+
+       cur = (__le32 *) (fw->data + sizeof(*hdr));
+       len = fw->size - sizeof(*hdr);
+       mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
+
+       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+       /* Trigger ROM */
+       mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
+
+       if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
+               dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
+               ret = -ETIMEDOUT;
+       }
+
+out:
+       /* release semaphore */
+       if (rom_protect)
+               mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+       release_firmware(fw);
+       return ret;
+}
+
+static int
+mt76pci_load_firmware(struct mt76x2_dev *dev)
+{
+       const struct firmware *fw;
+       const struct mt76x2_fw_header *hdr;
+       int i, len, ret;
+       __le32 *cur;
+       u32 offset, val;
+
+       ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
+       if (ret)
+               return ret;
+
+       if (!fw || !fw->data || fw->size < sizeof(*hdr))
+               goto error;
+
+       hdr = (const struct mt76x2_fw_header *) fw->data;
+
+       len = sizeof(*hdr);
+       len += le32_to_cpu(hdr->ilm_len);
+       len += le32_to_cpu(hdr->dlm_len);
+
+       if (fw->size != len)
+               goto error;
+
+       val = le16_to_cpu(hdr->fw_ver);
+       dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+                (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+       val = le16_to_cpu(hdr->build_ver);
+       dev_info(dev->mt76.dev, "Build: %x\n", val);
+       dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+       cur = (__le32 *) (fw->data + sizeof(*hdr));
+       len = le32_to_cpu(hdr->ilm_len);
+
+       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
+       mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
+
+       cur += len / sizeof(*cur);
+       len = le32_to_cpu(hdr->dlm_len);
+
+       if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+               offset = MT_MCU_DLM_ADDR_E3;
+       else
+               offset = MT_MCU_DLM_ADDR;
+
+       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
+       mt76_wr_copy(dev, offset, cur, len);
+
+       mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+       val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+       if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
+               mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
+
+       /* trigger firmware */
+       mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
+       for (i = 200; i > 0; i--) {
+               val = mt76_rr(dev, MT_MCU_COM_REG0);
+
+               if (val & 1)
+                       break;
+
+               msleep(10);
+       }
+
+       if (!i) {
+               dev_err(dev->mt76.dev, "Firmware failed to start\n");
+               release_firmware(fw);
+               return -ETIMEDOUT;
+       }
+
+       dev_info(dev->mt76.dev, "Firmware running!\n");
+
+       release_firmware(fw);
+
+       return ret;
+
+error:
+       dev_err(dev->mt76.dev, "Invalid firmware\n");
+       release_firmware(fw);
+       return -ENOENT;
+}
+
+static int
+mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
+                          u32 val)
+{
+       struct sk_buff *skb;
+       struct {
+           __le32 id;
+           __le32 value;
+       } __packed __aligned(4) msg = {
+           .id = cpu_to_le32(func),
+           .value = cpu_to_le32(val),
+       };
+
+       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+       return mt76x2_mcu_msg_send(dev, skb, CMD_FUN_SET_OP);
+}
+
+int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+                      u8 channel)
+{
+       struct sk_buff *skb;
+       struct {
+               u8 cr_mode;
+               u8 temp;
+               u8 ch;
+               u8 _pad0;
+
+               __le32 cfg;
+       } __packed __aligned(4) msg = {
+               .cr_mode = type,
+               .temp = temp_level,
+               .ch = channel,
+       };
+       u32 val;
+
+       val = BIT(31);
+       val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+       val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+       msg.cfg = cpu_to_le32(val);
+
+       /* first set the channel without the extension channel info */
+       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+       return mt76x2_mcu_msg_send(dev, skb, CMD_LOAD_CR);
+}
+
+int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+                          u8 bw_index, bool scan)
+{
+       struct sk_buff *skb;
+       struct {
+               u8 idx;
+               u8 scan;
+               u8 bw;
+               u8 _pad0;
+
+               __le16 chainmask;
+               u8 ext_chan;
+               u8 _pad1;
+
+       } __packed __aligned(4) msg = {
+               .idx = channel,
+               .scan = scan,
+               .bw = bw,
+               .chainmask = cpu_to_le16(dev->chainmask),
+       };
+
+       /* first set the channel without the extension channel info */
+       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+       mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
+
+       usleep_range(5000, 10000);
+
+       msg.ext_chan = 0xe0 + bw_index;
+       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+       return mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
+}
+
+int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on)
+{
+       struct sk_buff *skb;
+       struct {
+               __le32 mode;
+               __le32 level;
+       } __packed __aligned(4) msg = {
+               .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
+               .level = cpu_to_le32(0),
+       };
+
+       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+       return mt76x2_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP);
+}
+
+int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+                        u32 param)
+{
+       struct sk_buff *skb;
+       struct {
+               __le32 id;
+               __le32 value;
+       } __packed __aligned(4) msg = {
+               .id = cpu_to_le32(type),
+               .value = cpu_to_le32(param),
+       };
+       int ret;
+
+       mt76_clear(dev, MT_MCU_COM_REG0, BIT(31));
+
+       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+       ret = mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
+       if (ret)
+               return ret;
+
+       if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
+                                   BIT(31), BIT(31), 100)))
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
+                        struct mt76x2_tssi_comp *tssi_data)
+{
+       struct sk_buff *skb;
+       struct {
+               __le32 id;
+               struct mt76x2_tssi_comp data;
+       } __packed __aligned(4) msg = {
+               .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+               .data = *tssi_data,
+       };
+
+       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+       return mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
+}
+
+int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+                        bool force)
+{
+       struct sk_buff *skb;
+       struct {
+               __le32 channel;
+               __le32 gain_val;
+       } __packed __aligned(4) msg = {
+               .channel = cpu_to_le32(channel),
+               .gain_val = cpu_to_le32(gain),
+       };
+
+       if (force)
+               msg.channel |= cpu_to_le32(BIT(31));
+
+       skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+       return mt76x2_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP);
+}
+
+int mt76x2_mcu_init(struct mt76x2_dev *dev)
+{
+       int ret;
+
+       mutex_init(&dev->mcu.mutex);
+
+       ret = mt76pci_load_rom_patch(dev);
+       if (ret)
+               return ret;
+
+       ret = mt76pci_load_firmware(dev);
+       if (ret)
+               return ret;
+
+       mt76x2_mcu_function_select(dev, Q_SELECT, 1);
+       return 0;
+}
+
+int mt76x2_mcu_cleanup(struct mt76x2_dev *dev)
+{
+       struct sk_buff *skb;
+
+       mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
+       usleep_range(20000, 30000);
+
+       while ((skb = skb_dequeue(&dev->mcu.res_q)) != NULL)
+               dev_kfree_skb(skb);
+
+       return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
new file mode 100644 (file)
index 0000000..d7a7e83
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_MCU_H
+#define __MT76x2_MCU_H
+
+/* Register definitions */
+#define MT_MCU_CPU_CTL                 0x0704
+#define MT_MCU_CLOCK_CTL               0x0708
+#define MT_MCU_RESET_CTL               0x070C
+#define MT_MCU_INT_LEVEL               0x0718
+#define MT_MCU_COM_REG0                        0x0730
+#define MT_MCU_COM_REG1                        0x0734
+#define MT_MCU_COM_REG2                        0x0738
+#define MT_MCU_COM_REG3                        0x073C
+#define MT_MCU_PCIE_REMAP_BASE1                0x0740
+#define MT_MCU_PCIE_REMAP_BASE2                0x0744
+#define MT_MCU_PCIE_REMAP_BASE3                0x0748
+#define MT_MCU_PCIE_REMAP_BASE4                0x074C
+
+#define MT_LED_CTRL                    0x0770
+#define MT_LED_CTRL_REPLAY(_n)         BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n)       BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n)  BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n)           BIT(7 + (8 * (_n)))
+
+#define MT_LED_TX_BLINK_0              0x0774
+#define MT_LED_TX_BLINK_1              0x0778
+
+#define MT_LED_S0_BASE                 0x077C
+#define MT_LED_S0(_n)                  (MT_LED_S0_BASE + 8 * (_n))
+#define MT_LED_S1_BASE                 0x0780
+#define MT_LED_S1(_n)                  (MT_LED_S1_BASE + 8 * (_n))
+#define MT_LED_STATUS_OFF_MASK         GENMASK(31, 24)
+#define MT_LED_STATUS_OFF(_v)          (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
+                                        MT_LED_STATUS_OFF_MASK)
+#define MT_LED_STATUS_ON_MASK          GENMASK(23, 16)
+#define MT_LED_STATUS_ON(_v)           (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
+                                        MT_LED_STATUS_ON_MASK)
+#define MT_LED_STATUS_DURATION_MASK    GENMASK(15, 8)
+#define MT_LED_STATUS_DURATION(_v)     (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
+                                        MT_LED_STATUS_DURATION_MASK)
+
+#define MT_MCU_SEMAPHORE_00            0x07B0
+#define MT_MCU_SEMAPHORE_01            0x07B4
+#define MT_MCU_SEMAPHORE_02            0x07B8
+#define MT_MCU_SEMAPHORE_03            0x07BC
+
+#define MT_MCU_ROM_PATCH_OFFSET                0x80000
+#define MT_MCU_ROM_PATCH_ADDR          0x90000
+
+#define MT_MCU_ILM_OFFSET              0x80000
+#define MT_MCU_ILM_ADDR                        0x80000
+
+#define MT_MCU_DLM_OFFSET              0x100000
+#define MT_MCU_DLM_ADDR                        0x90000
+#define MT_MCU_DLM_ADDR_E3             0x90800
+
+enum mcu_cmd {
+       CMD_FUN_SET_OP = 1,
+       CMD_LOAD_CR = 2,
+       CMD_INIT_GAIN_OP = 3,
+       CMD_DYNC_VGA_OP = 6,
+       CMD_TDLS_CH_SW = 7,
+       CMD_BURST_WRITE = 8,
+       CMD_READ_MODIFY_WRITE = 9,
+       CMD_RANDOM_READ = 10,
+       CMD_BURST_READ = 11,
+       CMD_RANDOM_WRITE = 12,
+       CMD_LED_MODE_OP = 16,
+       CMD_POWER_SAVING_OP = 20,
+       CMD_WOW_CONFIG = 21,
+       CMD_WOW_QUERY = 22,
+       CMD_WOW_FEATURE = 24,
+       CMD_CARRIER_DETECT_OP = 28,
+       CMD_RADOR_DETECT_OP = 29,
+       CMD_SWITCH_CHANNEL_OP = 30,
+       CMD_CALIBRATION_OP = 31,
+       CMD_BEACON_OP = 32,
+       CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+       Q_SELECT = 1,
+       BW_SETTING = 2,
+       USB2_SW_DISCONNECT = 2,
+       USB3_SW_DISCONNECT = 3,
+       LOG_FW_DEBUG_MSG = 4,
+       GET_FW_VERSION = 5,
+};
+
+enum mcu_power_mode {
+       RADIO_OFF = 0x30,
+       RADIO_ON = 0x31,
+       RADIO_OFF_AUTO_WAKEUP = 0x32,
+       RADIO_OFF_ADVANCE = 0x33,
+       RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibration {
+       MCU_CAL_R = 1,
+       MCU_CAL_TEMP_SENSOR,
+       MCU_CAL_RXDCOC,
+       MCU_CAL_RC,
+       MCU_CAL_SX_LOGEN,
+       MCU_CAL_LC,
+       MCU_CAL_TX_LOFT,
+       MCU_CAL_TXIQ,
+       MCU_CAL_TSSI,
+       MCU_CAL_TSSI_COMP,
+       MCU_CAL_DPD,
+       MCU_CAL_RXIQC_FI,
+       MCU_CAL_RXIQC_FD,
+       MCU_CAL_PWRON,
+       MCU_CAL_TX_SHAPING,
+};
+
+enum mt76x2_mcu_cr_mode {
+       MT_RF_CR,
+       MT_BBP_CR,
+       MT_RF_BBP_CR,
+       MT_HL_TEMP_CR_UPDATE,
+};
+
+struct mt76x2_tssi_comp {
+       u8 pa_mode;
+       u8 cal_mode;
+       u16 pad;
+
+       u8 slope0;
+       u8 slope1;
+       u8 offset0;
+       u8 offset1;
+} __packed __aligned(4);
+
+int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+                        u32 param);
+int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
+int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+                        bool force);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
new file mode 100644 (file)
index 0000000..e66f047
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt76x2.h"
+#include "mt76x2_trace.h"
+
+static const struct pci_device_id mt76pci_device_table[] = {
+       { PCI_DEVICE(0x14c3, 0x7662) },
+       { PCI_DEVICE(0x14c3, 0x7612) },
+       { PCI_DEVICE(0x14c3, 0x7602) },
+       { },
+};
+
+static int
+mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct mt76x2_dev *dev;
+       int ret;
+
+       ret = pcim_enable_device(pdev);
+       if (ret)
+               return ret;
+
+       ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+       if (ret)
+               return ret;
+
+       pci_set_master(pdev);
+
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
+       dev = mt76x2_alloc_device(&pdev->dev);
+       if (!dev)
+               return -ENOMEM;
+
+       mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+
+       dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+       dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+       ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x2_irq_handler,
+                              IRQF_SHARED, KBUILD_MODNAME, dev);
+       if (ret)
+               goto error;
+
+       ret = mt76x2_register_device(dev);
+       if (ret)
+               goto error;
+
+       /* Fix up ASPM configuration */
+
+       /* RG_SSUSB_G1_CDR_BIR_LTR = 0x9 */
+       mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
+
+       /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
+       mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+
+       /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
+       mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
+
+       return 0;
+
+error:
+       ieee80211_free_hw(mt76_hw(dev));
+       return ret;
+}
+
+static void
+mt76pci_remove(struct pci_dev *pdev)
+{
+       struct mt76_dev *mdev = pci_get_drvdata(pdev);
+       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+       mt76_unregister_device(mdev);
+       mt76x2_cleanup(dev);
+       ieee80211_free_hw(mdev->hw);
+}
+
+MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
+MODULE_FIRMWARE(MT7662_FIRMWARE);
+MODULE_FIRMWARE(MT7662_ROM_PATCH);
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct pci_driver mt76pci_driver = {
+       .name           = KBUILD_MODNAME,
+       .id_table       = mt76pci_device_table,
+       .probe          = mt76pci_probe,
+       .remove         = mt76pci_remove,
+};
+
+module_pci_driver(mt76pci_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
new file mode 100644 (file)
index 0000000..1264971
--- /dev/null
@@ -0,0 +1,758 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_eeprom.h"
+
+static void
+mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+       s8 gain;
+
+       gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+       gain -= offset / 2;
+       mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
+}
+
+static void
+mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+       s8 gain;
+
+       gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+       gain += offset;
+       mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
+}
+
+static void
+mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
+{
+       s8 *gain_adj = dev->cal.rx.high_gain;
+
+       mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
+       mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
+
+       mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
+       mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
+}
+
+static u32
+mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+       u32 val = 0;
+
+       val |= (v1 & (BIT(6) - 1)) << 0;
+       val |= (v2 & (BIT(6) - 1)) << 8;
+       val |= (v3 & (BIT(6) - 1)) << 16;
+       val |= (v4 & (BIT(6) - 1)) << 24;
+       return val;
+}
+
+int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
+{
+       struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
+
+       rssi += cal->rssi_offset[chain];
+       rssi -= cal->lna_gain;
+
+       return rssi;
+}
+
+static u8
+mt76x2_txpower_check(int value)
+{
+       if (value < 0)
+               return 0;
+       if (value > 0x2f)
+               return 0x2f;
+       return value;
+}
+
+static void
+mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+       int i;
+
+       for (i = 0; i < sizeof(r->all); i++)
+               r->all[i] += offset;
+}
+
+static void
+mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+       int i;
+
+       for (i = 0; i < sizeof(r->all); i++)
+               if (r->all[i] > limit)
+                       r->all[i] = limit;
+}
+
+static int
+mt76x2_get_max_power(struct mt76_rate_power *r)
+{
+       int i;
+       s8 ret = 0;
+
+       for (i = 0; i < sizeof(r->all); i++)
+               ret = max(ret, r->all[i]);
+
+       return ret;
+}
+
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
+{
+       enum nl80211_chan_width width = dev->mt76.chandef.width;
+       struct mt76x2_tx_power_info txp;
+       int txp_0, txp_1, delta = 0;
+       struct mt76_rate_power t = {};
+
+       mt76x2_get_power_info(dev, &txp);
+
+       if (width == NL80211_CHAN_WIDTH_40)
+               delta = txp.delta_bw40;
+       else if (width == NL80211_CHAN_WIDTH_80)
+               delta = txp.delta_bw80;
+
+       if (txp.target_power > dev->txpower_conf)
+               delta -= txp.target_power - dev->txpower_conf;
+
+       mt76x2_get_rate_power(dev, &t);
+       mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power +
+                                  txp.chain[0].delta);
+       mt76x2_limit_rate_power(&t, dev->txpower_conf);
+       dev->txpower_cur = mt76x2_get_max_power(&t);
+       mt76x2_add_rate_power_offset(&t, -(txp.chain[0].target_power +
+                                        txp.chain[0].delta + delta));
+       dev->target_power = txp.chain[0].target_power;
+       dev->target_power_delta[0] = txp.chain[0].delta + delta;
+       dev->target_power_delta[1] = txp.chain[1].delta + delta;
+       dev->rate_power = t;
+
+       txp_0 = mt76x2_txpower_check(txp.chain[0].target_power +
+                                  txp.chain[0].delta + delta);
+
+       txp_1 = mt76x2_txpower_check(txp.chain[1].target_power +
+                                  txp.chain[1].delta + delta);
+
+       mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+       mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+       mt76_wr(dev, MT_TX_PWR_CFG_0,
+               mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
+       mt76_wr(dev, MT_TX_PWR_CFG_1,
+               mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
+       mt76_wr(dev, MT_TX_PWR_CFG_2,
+               mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
+       mt76_wr(dev, MT_TX_PWR_CFG_3,
+               mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
+       mt76_wr(dev, MT_TX_PWR_CFG_4,
+               mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
+       mt76_wr(dev, MT_TX_PWR_CFG_7,
+               mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
+       mt76_wr(dev, MT_TX_PWR_CFG_8,
+               mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
+       mt76_wr(dev, MT_TX_PWR_CFG_9,
+               mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
+}
+
+static bool
+mt76x2_channel_silent(struct mt76x2_dev *dev)
+{
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+       return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+               chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
+static bool
+mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
+{
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+       u32 flag = 0;
+
+       if (!mt76x2_tssi_enabled(dev))
+               return false;
+
+       if (mt76x2_channel_silent(dev))
+               return false;
+
+       if (chan->band == NL80211_BAND_2GHZ)
+               flag |= BIT(0);
+
+       if (mt76x2_ext_pa_enabled(dev, chan->band))
+               flag |= BIT(8);
+
+       mt76x2_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+       dev->cal.tssi_cal_done = true;
+       return true;
+}
+
+static void
+mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
+{
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+       bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+       if (dev->cal.channel_cal_done)
+               return;
+
+       if (mt76x2_channel_silent(dev))
+               return;
+
+       if (!dev->cal.tssi_cal_done)
+               mt76x2_phy_tssi_init_cal(dev);
+
+       if (!mac_stopped)
+               mt76x2_mac_stop(dev, false);
+
+       if (is_5ghz)
+               mt76x2_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+       mt76x2_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+       mt76x2_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+       mt76x2_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+       mt76x2_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+       mt76x2_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
+
+       if (!mac_stopped)
+               mt76x2_mac_resume(dev);
+
+       mt76x2_apply_gain_adj(dev);
+
+       dev->cal.channel_cal_done = true;
+}
+
+static void
+mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band)
+{
+       u32 pa_mode[2];
+       u32 pa_mode_adj;
+
+       if (band == NL80211_BAND_2GHZ) {
+               pa_mode[0] = 0x010055ff;
+               pa_mode[1] = 0x00550055;
+
+               mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
+               mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
+
+               if (mt76x2_ext_pa_enabled(dev, band)) {
+                       mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
+                       mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
+               } else {
+                       mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
+                       mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
+               }
+       } else {
+               pa_mode[0] = 0x0000ffff;
+               pa_mode[1] = 0x00ff00ff;
+
+               if (mt76x2_ext_pa_enabled(dev, band)) {
+                       mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
+                       mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
+               } else {
+                       mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
+                       mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
+               }
+               mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+
+               if (mt76x2_ext_pa_enabled(dev, band))
+                       pa_mode_adj = 0x04000000;
+               else
+                       pa_mode_adj = 0;
+
+               mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
+               mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
+       }
+
+       mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
+       mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
+       mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
+       mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
+
+       if (mt76x2_ext_pa_enabled(dev, band)) {
+               u32 val;
+
+               if (band == NL80211_BAND_2GHZ)
+                       val = 0x3c3c023c;
+               else
+                       val = 0x363c023c;
+
+               mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+               mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+               mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
+       } else {
+               if (band == NL80211_BAND_2GHZ) {
+                       u32 val = 0x0f3c3c3c;
+
+                       mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+                       mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+                       mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
+               } else {
+                       mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
+                       mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
+                       mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+               }
+       }
+}
+
+static void
+mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw)
+{
+       u32 cfg0, cfg1;
+
+       if (mt76x2_ext_pa_enabled(dev, band)) {
+               cfg0 = bw ? 0x000b0c01 : 0x00101101;
+               cfg1 = 0x00011414;
+       } else {
+               cfg0 = bw ? 0x000b0b01 : 0x00101001;
+               cfg1 = 0x00021414;
+       }
+       mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
+       mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
+
+       mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_CCK_SIFS,
+                      13 + (bw ? 1 : 0));
+}
+
+static void
+mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
+{
+       int core_val, agc_val;
+
+       switch (width) {
+       case NL80211_CHAN_WIDTH_80:
+               core_val = 3;
+               agc_val = 7;
+               break;
+       case NL80211_CHAN_WIDTH_40:
+               core_val = 2;
+               agc_val = 3;
+               break;
+       default:
+               core_val = 0;
+               agc_val = 1;
+               break;
+       }
+
+       mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+       mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+       mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+       mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+
+static void
+mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
+{
+       switch (band) {
+       case NL80211_BAND_2GHZ:
+               mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+               mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+               break;
+       case NL80211_BAND_5GHZ:
+               mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+               mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+               break;
+       }
+
+       mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+                      primary_upper);
+}
+
+static void
+mt76x2_set_rx_chains(struct mt76x2_dev *dev)
+{
+       u32 val;
+
+       val = mt76_rr(dev, MT_BBP(AGC, 0));
+       val &= ~(BIT(3) | BIT(4));
+
+       if (dev->chainmask & BIT(1))
+               val |= BIT(3);
+
+       mt76_wr(dev, MT_BBP(AGC, 0), val);
+}
+
+static void
+mt76x2_set_tx_dac(struct mt76x2_dev *dev)
+{
+       if (dev->chainmask & BIT(1))
+               mt76_set(dev, MT_BBP(TXBE, 5), 3);
+       else
+               mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+}
+
+static void
+mt76x2_get_agc_gain(struct mt76x2_dev *dev, u8 *dest)
+{
+       dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN);
+       dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN);
+}
+
+static int
+mt76x2_get_rssi_gain_thresh(struct mt76x2_dev *dev)
+{
+       switch (dev->mt76.chandef.width) {
+       case NL80211_CHAN_WIDTH_80:
+               return -62;
+       case NL80211_CHAN_WIDTH_40:
+               return -65;
+       default:
+               return -68;
+       }
+}
+
+static int
+mt76x2_get_low_rssi_gain_thresh(struct mt76x2_dev *dev)
+{
+       switch (dev->mt76.chandef.width) {
+       case NL80211_CHAN_WIDTH_80:
+               return -76;
+       case NL80211_CHAN_WIDTH_40:
+               return -79;
+       default:
+               return -82;
+       }
+}
+
+static void
+mt76x2_phy_set_gain_val(struct mt76x2_dev *dev)
+{
+       u32 val;
+       u8 gain_val[2];
+
+       gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+       gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
+
+       if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+               val = 0x1e42 << 16;
+       else
+               val = 0x1836 << 16;
+
+       val |= 0xf8;
+
+       mt76_wr(dev, MT_BBP(AGC, 8),
+               val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0]));
+       mt76_wr(dev, MT_BBP(AGC, 9),
+               val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
+
+       if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
+               mt76x2_dfs_adjust_agc(dev);
+}
+
+static void
+mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
+{
+       u32 false_cca;
+       u8 limit = dev->cal.low_gain > 1 ? 4 : 16;
+
+       false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
+       if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
+               dev->cal.agc_gain_adjust += 2;
+       else if (false_cca < 10 && dev->cal.agc_gain_adjust > 0)
+               dev->cal.agc_gain_adjust -= 2;
+       else
+               return;
+
+       mt76x2_phy_set_gain_val(dev);
+}
+
+static void
+mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
+{
+       u32 val = mt76_rr(dev, MT_BBP(AGC, 20));
+       int rssi0 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI0, val);
+       int rssi1 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI1, val);
+       u8 *gain = dev->cal.agc_gain_init;
+       u8 gain_delta;
+       int low_gain;
+
+       dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + (rssi0 << 8);
+       dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 + (rssi1 << 8);
+       dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] +
+                                dev->cal.avg_rssi[1]) / 512;
+
+       low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
+                  (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
+
+       if (dev->cal.low_gain == low_gain) {
+               mt76x2_phy_adjust_vga_gain(dev);
+               return;
+       }
+
+       dev->cal.low_gain = low_gain;
+
+       if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+               mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
+       else
+               mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
+
+       if (low_gain) {
+               mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
+               mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
+               mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
+               if (mt76x2_has_ext_lna(dev))
+                       gain_delta = 10;
+               else
+                       gain_delta = 14;
+       } else {
+               mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
+               if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+                       mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
+               else
+                       mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
+               mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
+               gain_delta = 0;
+       }
+
+       dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
+       dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
+       dev->cal.agc_gain_adjust = 0;
+       mt76x2_phy_set_gain_val(dev);
+}
+
+int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
+                          struct cfg80211_chan_def *chandef)
+{
+       struct ieee80211_channel *chan = chandef->chan;
+       bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+       enum nl80211_band band = chan->band;
+       u8 channel;
+
+       u32 ext_cca_chan[4] = {
+               [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+               [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+               [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+               [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+                     FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+       };
+       int ch_group_index;
+       u8 bw, bw_index;
+       int freq, freq1;
+       int ret;
+       u8 sifs = 13;
+
+       dev->cal.channel_cal_done = false;
+       freq = chandef->chan->center_freq;
+       freq1 = chandef->center_freq1;
+       channel = chan->hw_value;
+
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_40:
+               bw = 1;
+               if (freq1 > freq) {
+                       bw_index = 1;
+                       ch_group_index = 0;
+               } else {
+                       bw_index = 3;
+                       ch_group_index = 1;
+               }
+               channel += 2 - ch_group_index * 4;
+               break;
+       case NL80211_CHAN_WIDTH_80:
+               ch_group_index = (freq - freq1 + 30) / 20;
+               if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+                       ch_group_index = 0;
+               bw = 2;
+               bw_index = ch_group_index;
+               channel += 6 - ch_group_index * 4;
+               break;
+       default:
+               bw = 0;
+               bw_index = 0;
+               ch_group_index = 0;
+               break;
+       }
+
+       mt76x2_read_rx_gain(dev);
+       mt76x2_phy_set_txpower_regs(dev, band);
+       mt76x2_configure_tx_delay(dev, band, bw);
+       mt76x2_phy_set_txpower(dev);
+
+       mt76x2_set_rx_chains(dev);
+       mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
+       mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+       mt76x2_set_tx_dac(dev);
+
+       mt76_rmw(dev, MT_EXT_CCA_CFG,
+                (MT_EXT_CCA_CFG_CCA0 |
+                 MT_EXT_CCA_CFG_CCA1 |
+                 MT_EXT_CCA_CFG_CCA2 |
+                 MT_EXT_CCA_CFG_CCA3 |
+                 MT_EXT_CCA_CFG_CCA_MASK),
+                ext_cca_chan[ch_group_index]);
+
+       if (chandef->width >= NL80211_CHAN_WIDTH_40)
+               sifs++;
+
+       mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, sifs);
+
+       ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
+       if (ret)
+               return ret;
+
+       mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+       /* Enable LDPC Rx */
+       if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+               mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+       if (!dev->cal.init_cal_done) {
+               u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+               if (val != 0xff)
+                       mt76x2_mcu_calibrate(dev, MCU_CAL_R, 0);
+       }
+
+       mt76x2_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+       /* Rx LPF calibration */
+       if (!dev->cal.init_cal_done)
+               mt76x2_mcu_calibrate(dev, MCU_CAL_RC, 0);
+
+       dev->cal.init_cal_done = true;
+
+       mt76_wr(dev, MT_BBP(AGC, 61), 0xFF64A4E2);
+       mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+       mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+       mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+       mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F);
+
+       if (scan)
+               return 0;
+
+       dev->cal.low_gain = -1;
+       mt76x2_phy_channel_calibrate(dev, true);
+       mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init);
+       memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
+              sizeof(dev->cal.agc_gain_cur));
+
+       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+                                    MT_CALIBRATE_INTERVAL);
+
+       return 0;
+}
+
+static void
+mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev)
+{
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+       struct mt76x2_tx_power_info txp;
+       struct mt76x2_tssi_comp t = {};
+
+       if (!dev->cal.tssi_cal_done)
+               return;
+
+       if (!dev->cal.tssi_comp_pending) {
+               /* TSSI trigger */
+               t.cal_mode = BIT(0);
+               mt76x2_mcu_tssi_comp(dev, &t);
+               dev->cal.tssi_comp_pending = true;
+       } else {
+               if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+                       return;
+
+               dev->cal.tssi_comp_pending = false;
+               mt76x2_get_power_info(dev, &txp);
+
+               if (mt76x2_ext_pa_enabled(dev, chan->band))
+                       t.pa_mode = 1;
+
+               t.cal_mode = BIT(1);
+               t.slope0 = txp.chain[0].tssi_slope;
+               t.offset0 = txp.chain[0].tssi_offset;
+               t.slope1 = txp.chain[1].tssi_slope;
+               t.offset1 = txp.chain[1].tssi_offset;
+               mt76x2_mcu_tssi_comp(dev, &t);
+
+               if (t.pa_mode || dev->cal.dpd_cal_done)
+                       return;
+
+               usleep_range(10000, 20000);
+               mt76x2_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
+               dev->cal.dpd_cal_done = true;
+       }
+}
+
+static void
+mt76x2_phy_temp_compensate(struct mt76x2_dev *dev)
+{
+       struct mt76x2_temp_comp t;
+       int temp, db_diff;
+
+       if (mt76x2_get_temp_comp(dev, &t))
+               return;
+
+       temp = mt76_get_field(dev, MT_TEMP_SENSOR, MT_TEMP_SENSOR_VAL);
+       temp -= t.temp_25_ref;
+       temp = (temp * 1789) / 1000 + 25;
+       dev->cal.temp = temp;
+
+       if (temp > 25)
+               db_diff = (temp - 25) / t.high_slope;
+       else
+               db_diff = (25 - temp) / t.low_slope;
+
+       db_diff = min(db_diff, t.upper_bound);
+       db_diff = max(db_diff, t.lower_bound);
+
+       mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+                      db_diff * 2);
+       mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+                      db_diff * 2);
+}
+
+void mt76x2_phy_calibrate(struct work_struct *work)
+{
+       struct mt76x2_dev *dev;
+
+       dev = container_of(work, struct mt76x2_dev, cal_work.work);
+       mt76x2_phy_channel_calibrate(dev, false);
+       mt76x2_phy_tssi_compensate(dev);
+       mt76x2_phy_temp_compensate(dev);
+       mt76x2_phy_update_channel_gain(dev);
+       ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+                                    MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2_phy_start(struct mt76x2_dev *dev)
+{
+       int ret;
+
+       ret = mt76x2_mcu_set_radio_state(dev, true);
+       if (ret)
+               return ret;
+
+       mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+
+       return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
new file mode 100644 (file)
index 0000000..ce3ab85
--- /dev/null
@@ -0,0 +1,587 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_REGS_H
+#define __MT76x2_REGS_H
+
+#define MT_ASIC_VERSION                        0x0000
+
+#define MT76XX_REV_E3          0x22
+#define MT76XX_REV_E4          0x33
+
+#define MT_CMB_CTRL                    0x0020
+#define MT_CMB_CTRL_XTAL_RDY           BIT(22)
+#define MT_CMB_CTRL_PLL_LD             BIT(23)
+
+#define MT_EFUSE_CTRL                  0x0024
+#define MT_EFUSE_CTRL_AOUT             GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE             GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME     GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME      GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN              GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK             BIT(30)
+#define MT_EFUSE_CTRL_SEL              BIT(31)
+
+#define MT_EFUSE_DATA_BASE             0x0028
+#define MT_EFUSE_DATA(_n)              (MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0                    0x0040
+#define MT_COEXCFG0_COEX_EN            BIT(0)
+
+#define MT_WLAN_FUN_CTRL               0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN       BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN   BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET    BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ  BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL        BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL   BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST     BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST     BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN    BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN       GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT      GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN   GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0                    0x0100
+#define MT_XO_CTRL1                    0x0104
+#define MT_XO_CTRL2                    0x0108
+#define MT_XO_CTRL3                    0x010c
+#define MT_XO_CTRL4                    0x0110
+
+#define MT_XO_CTRL5                    0x0114
+#define MT_XO_CTRL5_C2_VAL             GENMASK(14, 8)
+
+#define MT_XO_CTRL6                    0x0118
+#define MT_XO_CTRL6_C2_CTRL            GENMASK(14, 8)
+
+#define MT_XO_CTRL7                    0x011c
+
+#define MT_WLAN_MTC_CTRL               0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK       BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S     BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD    GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD    BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD    BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD    BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB    BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB    BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB    BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB    BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP      BIT(28)
+
+#define MT_INT_SOURCE_CSR              0x0200
+#define MT_INT_MASK_CSR                        0x0204
+
+#define MT_INT_RX_DONE(_n)             BIT(_n)
+#define MT_INT_RX_DONE_ALL             GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL             GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n)             BIT(_n + 4)
+#define MT_INT_RX_COHERENT             BIT(16)
+#define MT_INT_TX_COHERENT             BIT(17)
+#define MT_INT_ANY_COHERENT            BIT(18)
+#define MT_INT_MCU_CMD                 BIT(19)
+#define MT_INT_TBTT                    BIT(20)
+#define MT_INT_PRE_TBTT                        BIT(21)
+#define MT_INT_TX_STAT                 BIT(22)
+#define MT_INT_AUTO_WAKEUP             BIT(23)
+#define MT_INT_GPTIMER                 BIT(24)
+#define MT_INT_RXDELAYINT              BIT(26)
+#define MT_INT_TXDELAYINT              BIT(27)
+
+#define MT_WPDMA_GLO_CFG               0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN     BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY   BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN     BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY   BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE        GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE     BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN    BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN   GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS  BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET  BIT(31)
+
+#define MT_WPDMA_RST_IDX               0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG         0x0210
+
+#define MT_WMM_AIFSN           0x0214
+#define MT_WMM_AIFSN_MASK              GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n)         ((_n) * 4)
+
+#define MT_WMM_CWMIN           0x0218
+#define MT_WMM_CWMIN_MASK              GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n)         ((_n) * 4)
+
+#define MT_WMM_CWMAX           0x021c
+#define MT_WMM_CWMAX_MASK              GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n)         ((_n) * 4)
+
+#define MT_WMM_TXOP_BASE               0x0220
+#define MT_WMM_TXOP(_n)                        (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n)          ((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK               GENMASK(15, 0)
+
+#define MT_TSO_CTRL                    0x0250
+#define MT_HEADER_TRANS_CTRL_REG       0x0260
+
+#define MT_TX_RING_BASE                        0x0300
+#define MT_RX_RING_BASE                        0x03c0
+
+#define MT_TX_HW_QUEUE_MCU             8
+#define MT_TX_HW_QUEUE_MGMT            9
+
+#define MT_PBF_SYS_CTRL                        0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET      BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET      BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET      BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET      BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET      BIT(4)
+
+#define MT_PBF_CFG                     0x0404
+#define MT_PBF_CFG_TX0Q_EN             BIT(0)
+#define MT_PBF_CFG_TX1Q_EN             BIT(1)
+#define MT_PBF_CFG_TX2Q_EN             BIT(2)
+#define MT_PBF_CFG_TX3Q_EN             BIT(3)
+#define MT_PBF_CFG_RX0Q_EN             BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN          BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT             0x0408
+#define MT_PBF_RX_MAX_PCNT             0x040c
+
+#define MT_BCN_OFFSET_BASE             0x041c
+#define MT_BCN_OFFSET(_n)              (MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RF_BYPASS_0                 0x0504
+#define MT_RF_BYPASS_1                 0x0508
+#define MT_RF_SETTING_0                        0x050c
+
+#define MT_RF_DATA_WRITE               0x0524
+
+#define MT_RF_CTRL                     0x0528
+#define MT_RF_CTRL_ADDR                        GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE               BIT(12)
+#define MT_RF_CTRL_BUSY                        BIT(13)
+#define MT_RF_CTRL_IDX                 BIT(16)
+
+#define MT_RF_DATA_READ                        0x052c
+
+#define MT_FCE_PSE_CTRL                        0x0800
+#define MT_FCE_PARAMETERS              0x0804
+#define MT_FCE_CSO                     0x0808
+
+#define MT_FCE_L2_STUFF                        0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN       BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN      BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN    BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN    BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP    BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN      GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT     GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1      0x0824
+
+#define MT_PAUSE_ENABLE_CONTROL1       0x0a38
+
+#define MT_MAC_CSR0                    0x1000
+
+#define MT_MAC_SYS_CTRL                        0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR      BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP      BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX      BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX      BIT(3)
+
+#define MT_MAC_ADDR_DW0                        0x1008
+#define MT_MAC_ADDR_DW1                        0x100c
+
+#define MT_MAC_BSSID_DW0               0x1010
+#define MT_MAC_BSSID_DW1               0x1014
+#define MT_MAC_BSSID_DW1_ADDR          GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE     GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N     GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT        BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2  BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3  BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG                 0x1018
+
+#define MT_AMPDU_MAX_LEN_20M1S         0x1030
+#define MT_AMPDU_MAX_LEN_20M2S         0x1034
+#define MT_AMPDU_MAX_LEN_40M1S         0x1038
+#define MT_AMPDU_MAX_LEN_40M2S         0x103c
+#define MT_AMPDU_MAX_LEN               0x1040
+
+#define MT_WCID_DROP_BASE              0x106c
+#define MT_WCID_DROP(_n)               (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n)          BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK             0x108c
+
+#define MT_MAC_APC_BSSID_BASE          0x1090
+#define MT_MAC_APC_BSSID_L(_n)         (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n)         (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR                GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN         BIT(16)
+
+#define MT_XIFS_TIME_CFG               0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS      GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS     GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS     GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS          GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN   BIT(29)
+
+#define MT_BKOFF_SLOT_CFG              0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME     GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY     GENMASK(11, 8)
+
+#define MT_CH_TIME_CFG                 0x110c
+#define MT_CH_TIME_CFG_TIMER_EN                BIT(0)
+#define MT_CH_TIME_CFG_TX_AS_BUSY      BIT(1)
+#define MT_CH_TIME_CFG_RX_AS_BUSY      BIT(2)
+#define MT_CH_TIME_CFG_NAV_AS_BUSY     BIT(3)
+#define MT_CH_TIME_CFG_EIFS_AS_BUSY    BIT(4)
+#define MT_CH_TIME_CFG_MDRDY_CNT_EN    BIT(5)
+#define MT_CH_TIME_CFG_CH_TIMER_CLR    GENMASK(9, 8)
+#define MT_CH_TIME_CFG_MDRDY_CLR       GENMASK(11, 10)
+
+#define MT_PBF_LIFE_TIMER              0x1110
+
+#define MT_BEACON_TIME_CFG             0x1114
+#define MT_BEACON_TIME_CFG_INTVAL      GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN    BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE   GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN     BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX   BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP    GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG               0x1118
+#define MT_TBTT_TIMER_CFG              0x1124
+
+#define MT_INT_TIMER_CFG               0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT      GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER      GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN                        0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN    BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN    BIT(1)
+
+#define MT_CH_IDLE                     0x1130
+#define MT_CH_BUSY                     0x1134
+#define MT_EXT_CH_BUSY                 0x1138
+#define MT_ED_CCA_TIMER                        0x1140
+
+#define MT_MAC_STATUS                  0x1200
+#define MT_MAC_STATUS_TX               BIT(0)
+#define MT_MAC_STATUS_RX               BIT(1)
+
+#define MT_PWR_PIN_CFG                 0x1204
+#define MT_AUX_CLK_CFG                 0x120c
+
+#define MT_BB_PA_MODE_CFG0             0x1214
+#define MT_BB_PA_MODE_CFG1             0x1218
+#define MT_RF_PA_MODE_CFG0             0x121c
+#define MT_RF_PA_MODE_CFG1             0x1220
+
+#define MT_RF_PA_MODE_ADJ0             0x1228
+#define MT_RF_PA_MODE_ADJ1             0x122c
+
+#define MT_DACCLK_EN_DLY_CFG           0x1264
+
+#define MT_EDCA_CFG_BASE               0x1300
+#define MT_EDCA_CFG_AC(_n)             (MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP               GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN              GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN              GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX              GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0                        0x1314
+#define MT_TX_PWR_CFG_1                        0x1318
+#define MT_TX_PWR_CFG_2                        0x131c
+#define MT_TX_PWR_CFG_3                        0x1320
+#define MT_TX_PWR_CFG_4                        0x1324
+
+#define MT_TX_BAND_CFG                 0x132c
+#define MT_TX_BAND_CFG_UPPER_40M       BIT(0)
+#define MT_TX_BAND_CFG_5G              BIT(1)
+#define MT_TX_BAND_CFG_2G              BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY            0x1384
+#define MT_TX_MPDU_ADJ_INT             0x1388
+
+#define MT_TX_PWR_CFG_7                        0x13d4
+#define MT_TX_PWR_CFG_8                        0x13d8
+#define MT_TX_PWR_CFG_9                        0x13dc
+
+#define MT_TX_SW_CFG0                  0x1330
+#define MT_TX_SW_CFG1                  0x1334
+#define MT_TX_SW_CFG2                  0x1338
+
+#define MT_TXOP_CTRL_CFG               0x1340
+
+#define MT_TX_RTS_CFG                  0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT      GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH           GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK             BIT(24)
+
+#define MT_TX_TIMEOUT_CFG              0x1348
+#define MT_TX_TIMEOUT_CFG_ACKTO                GENMASK(15, 8)
+
+#define MT_TX_RETRY_CFG                        0x134c
+#define MT_VHT_HT_FBK_CFG1             0x1358
+
+#define MT_PROT_CFG_RATE               GENMASK(15, 0)
+#define MT_PROT_CFG_CTRL               GENMASK(17, 16)
+#define MT_PROT_CFG_NAV                        GENMASK(19, 18)
+#define MT_PROT_CFG_TXOP_ALLOW         GENMASK(25, 20)
+#define MT_PROT_CFG_RTS_THRESH         BIT(26)
+
+#define MT_CCK_PROT_CFG                        0x1364
+#define MT_OFDM_PROT_CFG               0x1368
+#define MT_MM20_PROT_CFG               0x136c
+#define MT_MM40_PROT_CFG               0x1370
+#define MT_GF20_PROT_CFG               0x1374
+#define MT_GF40_PROT_CFG               0x1378
+
+#define MT_EXP_ACK_TIME                        0x1380
+
+#define MT_TX_PWR_CFG_0_EXT            0x1390
+#define MT_TX_PWR_CFG_1_EXT            0x1394
+
+#define MT_TX_FBK_LIMIT                        0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK       GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK      GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR  BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT       BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR            0x13a0
+#define MT_TX1_RF_GAIN_CORR            0x13a4
+
+#define MT_TX_ALC_CFG_0                        0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0      GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1      GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0                GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1                GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1                        0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP      GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2                        0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP      GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_3                        0x13ac
+#define MT_TX_ALC_CFG_4                        0x13c0
+#define MT_TX_ALC_CFG_4_LOWGAIN_CH_EN  BIT(31)
+
+#define MT_TX_ALC_VGA3                 0x13c8
+
+#define MT_TX_PROT_CFG6                        0x13e0
+#define MT_TX_PROT_CFG7                        0x13e4
+#define MT_TX_PROT_CFG8                        0x13e8
+
+#define MT_PIFS_TX_CFG                 0x13ec
+
+#define MT_RX_FILTR_CFG                        0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR                BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR                BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC                BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS      BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR                BIT(4)
+#define MT_RX_FILTR_CFG_MCAST          BIT(5)
+#define MT_RX_FILTR_CFG_BCAST          BIT(6)
+#define MT_RX_FILTR_CFG_DUP            BIT(7)
+#define MT_RX_FILTR_CFG_CFACK          BIT(8)
+#define MT_RX_FILTR_CFG_CFEND          BIT(9)
+#define MT_RX_FILTR_CFG_ACK            BIT(10)
+#define MT_RX_FILTR_CFG_CTS            BIT(11)
+#define MT_RX_FILTR_CFG_RTS            BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL         BIT(13)
+#define MT_RX_FILTR_CFG_BA             BIT(14)
+#define MT_RX_FILTR_CFG_BAR            BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV       BIT(16)
+
+#define MT_LEGACY_BASIC_RATE           0x1408
+#define MT_HT_BASIC_RATE               0x140c
+
+#define MT_HT_CTRL_CFG                 0x1410
+
+#define MT_EXT_CCA_CFG                 0x141c
+#define MT_EXT_CCA_CFG_CCA0            GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1            GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2            GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3            GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK                GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK     GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3                  0x1478
+
+#define MT_PN_PAD_MODE                 0x150c
+
+#define MT_TXOP_HLDR_ET                        0x1608
+
+#define MT_PROT_AUTO_TX_CFG            0x1648
+#define MT_PROT_AUTO_TX_CFG_PROT_PADJ  GENMASK(11, 8)
+#define MT_PROT_AUTO_TX_CFG_AUTO_PADJ  GENMASK(27, 24)
+
+#define MT_RX_STAT_0                   0x1700
+#define MT_RX_STAT_0_CRC_ERRORS                GENMASK(15, 0)
+#define MT_RX_STAT_0_PHY_ERRORS                GENMASK(31, 16)
+
+#define MT_RX_STAT_1                   0x1704
+#define MT_RX_STAT_1_CCA_ERRORS                GENMASK(15, 0)
+#define MT_RX_STAT_1_PLCP_ERRORS       GENMASK(31, 16)
+
+#define MT_RX_STAT_2                   0x1708
+#define MT_RX_STAT_2_DUP_ERRORS                GENMASK(15, 0)
+#define MT_RX_STAT_2_OVERFLOW_ERRORS   GENMASK(31, 16)
+
+#define MT_TX_STAT_FIFO                        0x1718
+#define MT_TX_STAT_FIFO_VALID          BIT(0)
+#define MT_TX_STAT_FIFO_SUCCESS                BIT(5)
+#define MT_TX_STAT_FIFO_AGGR           BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ         BIT(7)
+#define MT_TX_STAT_FIFO_WCID           GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE           GENMASK(31, 16)
+
+#define MT_TX_AGG_CNT_BASE0            0x1720
+#define MT_TX_AGG_CNT_BASE1            0x174c
+
+#define MT_TX_AGG_CNT(_id)             ((_id) < 8 ?                    \
+                                        MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+                                        MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT            0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY      GENMASK(7, 0)
+#define MT_TX_STAT_FIFO_EXT_PKTID      GENMASK(15, 8)
+
+#define MT_WCID_TX_RATE_BASE           0x1c00
+#define MT_WCID_TX_RATE(_i)            (MT_WCID_TX_RATE_BASE + ((_i) << 3))
+
+#define MT_BBP_CORE_BASE               0x2000
+#define MT_BBP_IBI_BASE                        0x2100
+#define MT_BBP_AGC_BASE                        0x2300
+#define MT_BBP_TXC_BASE                        0x2400
+#define MT_BBP_RXC_BASE                        0x2500
+#define MT_BBP_TXO_BASE                        0x2600
+#define MT_BBP_TXBE_BASE               0x2700
+#define MT_BBP_RXFE_BASE               0x2800
+#define MT_BBP_RXO_BASE                        0x2900
+#define MT_BBP_DFS_BASE                        0x2a00
+#define MT_BBP_TR_BASE                 0x2b00
+#define MT_BBP_CAL_BASE                        0x2c00
+#define MT_BBP_DSC_BASE                        0x2e00
+#define MT_BBP_PFMU_BASE               0x2f00
+
+#define MT_BBP(_type, _n)              (MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW              GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN                GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW               GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_HIGH_GAIN       GENMASK(21, 16)
+#define MT_BBP_AGC_LNA_MID_GAIN                GENMASK(13, 8)
+#define MT_BBP_AGC_LNA_LOW_GAIN                GENMASK(5, 0)
+
+/* AGC, R6/R7 */
+#define MT_BBP_AGC_LNA_ULOW_GAIN       GENMASK(5, 0)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_LNA_GAIN_MODE       GENMASK(7, 6)
+#define MT_BBP_AGC_GAIN                        GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0             GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1             GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN       GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE              0x1800
+#define MT_WCID_ADDR(_n)               (MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE                   0x4000
+
+#define MT_WCID_KEY_BASE               0x8000
+#define MT_WCID_KEY(_n)                        (MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE                        0xa000
+#define MT_WCID_IV(_n)                 (MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE              0xa800
+#define MT_WCID_ATTR(_n)               (MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE          BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE         GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX           GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF          GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT     BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT       BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC         BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID                GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0                 0xac00
+#define MT_SKEY_BASE_1                 0xb400
+#define MT_SKEY_0(_bss, _idx)          (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx)          (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx)            ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0            0xb000
+#define MT_SKEY_MODE_BASE_1            0xb3f0
+#define MT_SKEY_MODE_0(_bss)           (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss)           (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss)             ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK              GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE                 0xc000
+
+#define MT_TEMP_SENSOR                 0x1d000
+#define MT_TEMP_SENSOR_VAL             GENMASK(6, 0)
+
+struct mt76_wcid_addr {
+       u8 macaddr[6];
+       __le16 ba_mask;
+} __packed __aligned(4);
+
+struct mt76_wcid_key {
+       u8 key[16];
+       u8 tx_mic[8];
+       u8 rx_mic[8];
+} __packed __aligned(4);
+
+enum mt76x2_cipher_type {
+       MT_CIPHER_NONE,
+       MT_CIPHER_WEP40,
+       MT_CIPHER_WEP104,
+       MT_CIPHER_TKIP,
+       MT_CIPHER_AES_CCMP,
+       MT_CIPHER_CKIP40,
+       MT_CIPHER_CKIP104,
+       MT_CIPHER_CKIP128,
+       MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c
new file mode 100644 (file)
index 0000000..a09f117
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mt76x2_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h
new file mode 100644 (file)
index 0000000..4cd4241
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76x2_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76x2_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76x2.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76x2
+
+#define MAXNAME                32
+#define DEV_ENTRY      __array(char, wiphy_name, 32)
+#define DEV_ASSIGN     strlcpy(__entry->wiphy_name, wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
+#define DEV_PR_FMT     "%s"
+#define DEV_PR_ARG     __entry->wiphy_name
+
+#define TXID_ENTRY     __field(u8, wcid) __field(u8, pktid)
+#define TXID_ASSIGN    __entry->wcid = wcid; __entry->pktid = pktid
+#define TXID_PR_FMT    " [%d:%d]"
+#define TXID_PR_ARG    __entry->wcid, __entry->pktid
+
+DECLARE_EVENT_CLASS(dev_evt,
+       TP_PROTO(struct mt76x2_dev *dev),
+       TP_ARGS(dev),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+       ),
+       TP_printk(DEV_PR_FMT, DEV_PR_ARG)
+);
+
+DECLARE_EVENT_CLASS(dev_txid_evt,
+       TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+       TP_ARGS(dev, wcid, pktid),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               TXID_ENTRY
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               TXID_ASSIGN;
+       ),
+       TP_printk(
+               DEV_PR_FMT TXID_PR_FMT,
+               DEV_PR_ARG, TXID_PR_ARG
+       )
+);
+
+DEFINE_EVENT(dev_evt, mac_txstat_poll,
+       TP_PROTO(struct mt76x2_dev *dev),
+       TP_ARGS(dev)
+);
+
+DEFINE_EVENT(dev_txid_evt, mac_txdone_add,
+       TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+       TP_ARGS(dev, wcid, pktid)
+);
+
+TRACE_EVENT(mac_txstat_fetch,
+       TP_PROTO(struct mt76x2_dev *dev,
+                struct mt76x2_tx_status *stat),
+
+       TP_ARGS(dev, stat),
+
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               TXID_ENTRY
+               __field(bool, success)
+               __field(bool, aggr)
+               __field(bool, ack_req)
+               __field(u16, rate)
+               __field(u8, retry)
+       ),
+
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->success = stat->success;
+               __entry->aggr = stat->aggr;
+               __entry->ack_req = stat->ack_req;
+               __entry->wcid = stat->wcid;
+               __entry->pktid = stat->pktid;
+               __entry->rate = stat->rate;
+               __entry->retry = stat->retry;
+       ),
+
+       TP_printk(
+               DEV_PR_FMT TXID_PR_FMT
+               " success:%d aggr:%d ack_req:%d"
+               " rate:%04x retry:%d",
+               DEV_PR_ARG, TXID_PR_ARG,
+               __entry->success, __entry->aggr, __entry->ack_req,
+               __entry->rate, __entry->retry
+       )
+);
+
+
+TRACE_EVENT(dev_irq,
+       TP_PROTO(struct mt76x2_dev *dev, u32 val, u32 mask),
+
+       TP_ARGS(dev, val, mask),
+
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(u32, val)
+               __field(u32, mask)
+       ),
+
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->val = val;
+               __entry->mask = mask;
+       ),
+
+       TP_printk(
+               DEV_PR_FMT " %08x & %08x",
+               DEV_PR_ARG, __entry->val, __entry->mask
+       )
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mt76x2_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
new file mode 100644 (file)
index 0000000..1a32e1f
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+struct beacon_bc_data {
+       struct mt76x2_dev *dev;
+       struct sk_buff_head q;
+       struct sk_buff *tail[8];
+};
+
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+            struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct mt76x2_dev *dev = hw->priv;
+       struct ieee80211_vif *vif = info->control.vif;
+       struct mt76_wcid *wcid = &dev->global_wcid;
+
+       if (control->sta) {
+               struct mt76x2_sta *msta;
+
+               msta = (struct mt76x2_sta *) control->sta->drv_priv;
+               wcid = &msta->wcid;
+       } else if (vif) {
+               struct mt76x2_vif *mvif;
+
+               mvif = (struct mt76x2_vif *) vif->drv_priv;
+               wcid = &mvif->group_wcid;
+       }
+
+       mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+       if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+               ieee80211_free_txskb(mt76_hw(dev), skb);
+       } else {
+               ieee80211_tx_info_clear_status(info);
+               info->status.rates[0].idx = -1;
+               info->flags |= IEEE80211_TX_STAT_ACK;
+               ieee80211_tx_status(mt76_hw(dev), skb);
+       }
+}
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+                              const struct ieee80211_tx_rate *rate)
+{
+       s8 max_txpwr;
+
+       if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+               u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+               if (mcs == 8 || mcs == 9) {
+                       max_txpwr = dev->rate_power.vht[8];
+               } else {
+                       u8 nss, idx;
+
+                       nss = ieee80211_rate_get_vht_nss(rate);
+                       idx = ((nss - 1) << 3) + mcs;
+                       max_txpwr = dev->rate_power.ht[idx & 0xf];
+               }
+       } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+               max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
+       } else {
+               enum nl80211_band band = dev->mt76.chandef.chan->band;
+
+               if (band == NL80211_BAND_2GHZ) {
+                       const struct ieee80211_rate *r;
+                       struct wiphy *wiphy = mt76_hw(dev)->wiphy;
+                       struct mt76_rate_power *rp = &dev->rate_power;
+
+                       r = &wiphy->bands[band]->bitrates[rate->idx];
+                       if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+                               max_txpwr = rp->cck[r->hw_value & 0x3];
+                       else
+                               max_txpwr = rp->ofdm[r->hw_value & 0x7];
+               } else {
+                       max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
+               }
+       }
+
+       return max_txpwr;
+}
+
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+       txpwr = min_t(s8, txpwr, dev->txpower_conf);
+       txpwr -= (dev->target_power + dev->target_power_delta[0]);
+       txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+       if (!dev->enable_tpc)
+               return 0;
+       else if (txpwr >= 0)
+               return min_t(s8, txpwr, 7);
+       else
+               return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
+{
+       s8 txpwr_adj;
+
+       txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
+                                           dev->rate_power.ofdm[4]);
+       mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+                      MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+       mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+                      MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+
+static int mt76x2_insert_hdr_pad(struct sk_buff *skb)
+{
+       int len = ieee80211_get_hdrlen_from_skb(skb);
+
+       if (len % 4 == 0)
+               return 0;
+
+       skb_push(skb, 2);
+       memmove(skb->data, skb->data + 2, len);
+
+       skb->data[len] = 0;
+       skb->data[len + 1] = 0;
+       return 2;
+}
+
+int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+                         struct sk_buff *skb, struct mt76_queue *q,
+                         struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+                         u32 *tx_info)
+{
+       struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       int qsel = MT_QSEL_EDCA;
+       int ret;
+
+       if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
+               mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
+
+       mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta);
+
+       ret = mt76x2_insert_hdr_pad(skb);
+       if (ret < 0)
+               return ret;
+
+       if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+               qsel = MT_QSEL_MGMT;
+
+       *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+                  MT_TXD_INFO_80211;
+
+       if (!wcid || wcid->hw_key_idx == 0xff)
+               *tx_info |= MT_TXD_INFO_WIV;
+
+       return 0;
+}
+
+static void
+mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct mt76x2_dev *dev = (struct mt76x2_dev *) priv;
+       struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+       struct sk_buff *skb = NULL;
+
+       if (!(dev->beacon_mask & BIT(mvif->idx)))
+               return;
+
+       skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+       if (!skb)
+               return;
+
+       mt76x2_mac_set_beacon(dev, mvif->idx, skb);
+}
+
+static void
+mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct beacon_bc_data *data = priv;
+       struct mt76x2_dev *dev = data->dev;
+       struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+       struct ieee80211_tx_info *info;
+       struct sk_buff *skb;
+
+       if (!(dev->beacon_mask & BIT(mvif->idx)))
+               return;
+
+       skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+       if (!skb)
+               return;
+
+       info = IEEE80211_SKB_CB(skb);
+       info->control.vif = vif;
+       info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+       mt76_skb_set_moredata(skb, true);
+       __skb_queue_tail(&data->q, skb);
+       data->tail[mvif->idx] = skb;
+}
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg)
+{
+       struct mt76x2_dev *dev = (struct mt76x2_dev *) arg;
+       struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
+       struct beacon_bc_data data = {};
+       struct sk_buff *skb;
+       int i, nframes;
+
+       data.dev = dev;
+       __skb_queue_head_init(&data.q);
+
+       ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+               IEEE80211_IFACE_ITER_RESUME_ALL,
+               mt76x2_update_beacon_iter, dev);
+
+       do {
+               nframes = skb_queue_len(&data.q);
+               ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+                       IEEE80211_IFACE_ITER_RESUME_ALL,
+                       mt76x2_add_buffered_bc, &data);
+       } while (nframes != skb_queue_len(&data.q));
+
+       if (!nframes)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+               if (!data.tail[i])
+                       continue;
+
+               mt76_skb_set_moredata(data.tail[i], false);
+       }
+
+       spin_lock_bh(&q->lock);
+       while ((skb = __skb_dequeue(&data.q)) != NULL) {
+               struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+               struct ieee80211_vif *vif = info->control.vif;
+               struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+
+               mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL);
+       }
+       spin_unlock_bh(&q->lock);
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/trace.c b/drivers/net/wireless/mediatek/mt76/trace.c
new file mode 100644 (file)
index 0000000..ea4ab87
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/trace.h b/drivers/net/wireless/mediatek/mt76/trace.h
new file mode 100644 (file)
index 0000000..ea30895
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76
+
+#define MAXNAME                32
+#define DEV_ENTRY   __array(char, wiphy_name, 32)
+#define DEV_ASSIGN  strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT  "%s"
+#define DEV_PR_ARG  __entry->wiphy_name
+
+#define REG_ENTRY      __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN     __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT     " %04x=%08x"
+#define REG_PR_ARG     __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+       TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+       TP_ARGS(dev, reg, val),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               REG_ENTRY
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               REG_ASSIGN;
+       ),
+       TP_printk(
+               DEV_PR_FMT REG_PR_FMT,
+               DEV_PR_ARG, REG_PR_ARG
+       )
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_rr,
+       TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+       TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_wr,
+       TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+       TP_ARGS(dev, reg, val)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
new file mode 100644 (file)
index 0000000..4eef69b
--- /dev/null
@@ -0,0 +1,511 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+
+static struct mt76_txwi_cache *
+mt76_alloc_txwi(struct mt76_dev *dev)
+{
+       struct mt76_txwi_cache *t;
+       dma_addr_t addr;
+       int size;
+
+       size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
+       t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+       if (!t)
+               return NULL;
+
+       addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
+                             DMA_TO_DEVICE);
+       t->dma_addr = addr;
+
+       return t;
+}
+
+static struct mt76_txwi_cache *
+__mt76_get_txwi(struct mt76_dev *dev)
+{
+       struct mt76_txwi_cache *t = NULL;
+
+       spin_lock_bh(&dev->lock);
+       if (!list_empty(&dev->txwi_cache)) {
+               t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
+                                    list);
+               list_del(&t->list);
+       }
+       spin_unlock_bh(&dev->lock);
+
+       return t;
+}
+
+static struct mt76_txwi_cache *
+mt76_get_txwi(struct mt76_dev *dev)
+{
+       struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
+
+       if (t)
+               return t;
+
+       return mt76_alloc_txwi(dev);
+}
+
+void
+mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+       if (!t)
+               return;
+
+       spin_lock_bh(&dev->lock);
+       list_add(&t->list, &dev->txwi_cache);
+       spin_unlock_bh(&dev->lock);
+}
+
+void mt76_tx_free(struct mt76_dev *dev)
+{
+       struct mt76_txwi_cache *t;
+
+       while ((t = __mt76_get_txwi(dev)) != NULL)
+               dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
+                                DMA_TO_DEVICE);
+}
+
+static int
+mt76_txq_get_qid(struct ieee80211_txq *txq)
+{
+       if (!txq->sta)
+               return MT_TXQ_BE;
+
+       return txq->ac;
+}
+
+int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+                     struct sk_buff *skb, struct mt76_wcid *wcid,
+                     struct ieee80211_sta *sta)
+{
+       struct mt76_queue_entry e;
+       struct mt76_txwi_cache *t;
+       struct mt76_queue_buf buf[32];
+       struct sk_buff *iter;
+       dma_addr_t addr;
+       int len;
+       u32 tx_info = 0;
+       int n, ret;
+
+       t = mt76_get_txwi(dev);
+       if (!t) {
+               ieee80211_free_txskb(dev->hw, skb);
+               return -ENOMEM;
+       }
+
+       dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
+                               DMA_TO_DEVICE);
+       ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
+                                      &tx_info);
+       dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
+                                  DMA_TO_DEVICE);
+       if (ret < 0)
+               goto free;
+
+       len = skb->len - skb->data_len;
+       addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
+       if (dma_mapping_error(dev->dev, addr)) {
+               ret = -ENOMEM;
+               goto free;
+       }
+
+       n = 0;
+       buf[n].addr = t->dma_addr;
+       buf[n++].len = dev->drv->txwi_size;
+       buf[n].addr = addr;
+       buf[n++].len = len;
+
+       skb_walk_frags(skb, iter) {
+               if (n == ARRAY_SIZE(buf))
+                       goto unmap;
+
+               addr = dma_map_single(dev->dev, iter->data, iter->len,
+                                     DMA_TO_DEVICE);
+               if (dma_mapping_error(dev->dev, addr))
+                       goto unmap;
+
+               buf[n].addr = addr;
+               buf[n++].len = iter->len;
+       }
+
+       if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+               goto unmap;
+
+       return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
+
+unmap:
+       ret = -ENOMEM;
+       for (n--; n > 0; n--)
+               dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
+                                DMA_TO_DEVICE);
+
+free:
+       e.skb = skb;
+       e.txwi = t;
+       dev->drv->tx_complete_skb(dev, q, &e, true);
+       mt76_put_txwi(dev, t);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_tx_queue_skb);
+
+void
+mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+       struct mt76_wcid *wcid, struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct mt76_queue *q;
+       int qid = skb_get_queue_mapping(skb);
+
+       if (WARN_ON(qid >= MT_TXQ_PSD)) {
+               qid = MT_TXQ_BE;
+               skb_set_queue_mapping(skb, qid);
+       }
+
+       if (!wcid->tx_rate_set)
+               ieee80211_get_tx_rates(info->control.vif, sta, skb,
+                                      info->control.rates, 1);
+
+       q = &dev->q_tx[qid];
+
+       spin_lock_bh(&q->lock);
+       mt76_tx_queue_skb(dev, q, skb, wcid, sta);
+       dev->queue_ops->kick(dev, q);
+
+       if (q->queued > q->ndesc - 8)
+               ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+       spin_unlock_bh(&q->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_tx);
+
+static struct sk_buff *
+mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
+{
+       struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+       struct sk_buff *skb;
+
+       skb = skb_dequeue(&mtxq->retry_q);
+       if (skb) {
+               u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+
+               if (ps && skb_queue_empty(&mtxq->retry_q))
+                       ieee80211_sta_set_buffered(txq->sta, tid, false);
+
+               return skb;
+       }
+
+       skb = ieee80211_tx_dequeue(dev->hw, txq);
+       if (!skb)
+               return NULL;
+
+       return skb;
+}
+
+static void
+mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+       if (!ieee80211_is_data_qos(hdr->frame_control))
+               return;
+
+       mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
+}
+
+static void
+mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
+                 struct sk_buff *skb, bool last)
+{
+       struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+
+       info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
+       if (last)
+               info->flags |= IEEE80211_TX_STATUS_EOSP;
+
+       mt76_skb_set_moredata(skb, !last);
+       mt76_tx_queue_skb(dev, hwq, skb, wcid, sta);
+}
+
+void
+mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+                            u16 tids, int nframes,
+                            enum ieee80211_frame_release_type reason,
+                            bool more_data)
+{
+       struct mt76_dev *dev = hw->priv;
+       struct sk_buff *last_skb = NULL;
+       struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+       int i;
+
+       spin_lock_bh(&hwq->lock);
+       for (i = 0; tids && nframes; i++, tids >>= 1) {
+               struct ieee80211_txq *txq = sta->txq[i];
+               struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+               struct sk_buff *skb;
+
+               if (!(tids & 1))
+                       continue;
+
+               do {
+                       skb = mt76_txq_dequeue(dev, mtxq, true);
+                       if (!skb)
+                               break;
+
+                       if (mtxq->aggr)
+                               mt76_check_agg_ssn(mtxq, skb);
+
+                       nframes--;
+                       if (last_skb)
+                               mt76_queue_ps_skb(dev, sta, last_skb, false);
+
+                       last_skb = skb;
+               } while (nframes);
+       }
+
+       if (last_skb) {
+               mt76_queue_ps_skb(dev, sta, last_skb, true);
+               dev->queue_ops->kick(dev, hwq);
+       }
+       spin_unlock_bh(&hwq->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
+
+static int
+mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
+                   struct mt76_txq *mtxq, bool *empty)
+{
+       struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+       struct ieee80211_tx_info *info;
+       struct mt76_wcid *wcid = mtxq->wcid;
+       struct sk_buff *skb;
+       int n_frames = 1, limit;
+       struct ieee80211_tx_rate tx_rate;
+       bool ampdu;
+       bool probe;
+       int idx;
+
+       skb = mt76_txq_dequeue(dev, mtxq, false);
+       if (!skb) {
+               *empty = true;
+               return 0;
+       }
+
+       info = IEEE80211_SKB_CB(skb);
+       if (!wcid->tx_rate_set)
+               ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
+                                      info->control.rates, 1);
+       tx_rate = info->control.rates[0];
+
+       probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+       ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
+       limit = ampdu ? 16 : 3;
+
+       if (ampdu)
+               mt76_check_agg_ssn(mtxq, skb);
+
+       idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+
+       if (idx < 0)
+               return idx;
+
+       do {
+               bool cur_ampdu;
+
+               if (probe)
+                       break;
+
+               if (test_bit(MT76_SCANNING, &dev->state) ||
+                   test_bit(MT76_RESET, &dev->state))
+                       return -EBUSY;
+
+               skb = mt76_txq_dequeue(dev, mtxq, false);
+               if (!skb) {
+                       *empty = true;
+                       break;
+               }
+
+               info = IEEE80211_SKB_CB(skb);
+               cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+
+               if (ampdu != cur_ampdu ||
+                   (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
+                       skb_queue_tail(&mtxq->retry_q, skb);
+                       break;
+               }
+
+               info->control.rates[0] = tx_rate;
+
+               if (cur_ampdu)
+                       mt76_check_agg_ssn(mtxq, skb);
+
+               idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+               if (idx < 0)
+                       return idx;
+
+               n_frames++;
+       } while (n_frames < limit);
+
+       if (!probe) {
+               hwq->swq_queued++;
+               hwq->entry[idx].schedule = true;
+       }
+
+       dev->queue_ops->kick(dev, hwq);
+
+       return n_frames;
+}
+
+static int
+mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
+{
+       struct mt76_txq *mtxq, *mtxq_last;
+       int len = 0;
+
+restart:
+       mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
+       while (!list_empty(&hwq->swq)) {
+               bool empty = false;
+               int cur;
+
+               mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
+               if (mtxq->send_bar && mtxq->aggr) {
+                       struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+                       struct ieee80211_sta *sta = txq->sta;
+                       struct ieee80211_vif *vif = txq->vif;
+                       u16 agg_ssn = mtxq->agg_ssn;
+                       u8 tid = txq->tid;
+
+                       mtxq->send_bar = false;
+                       spin_unlock_bh(&hwq->lock);
+                       ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
+                       spin_lock_bh(&hwq->lock);
+                       goto restart;
+               }
+
+               list_del_init(&mtxq->list);
+
+               cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
+               if (!empty)
+                       list_add_tail(&mtxq->list, &hwq->swq);
+
+               if (cur < 0)
+                       return cur;
+
+               len += cur;
+
+               if (mtxq == mtxq_last)
+                       break;
+       }
+
+       return len;
+}
+
+void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
+{
+       int len;
+
+       do {
+               if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
+                       break;
+
+               len = mt76_txq_schedule_list(dev, hwq);
+       } while (len > 0);
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule);
+
+void mt76_txq_schedule_all(struct mt76_dev *dev)
+{
+       int i;
+
+       for (i = 0; i <= MT_TXQ_BK; i++) {
+               struct mt76_queue *q = &dev->q_tx[i];
+
+               spin_lock_bh(&q->lock);
+               mt76_txq_schedule(dev, q);
+               spin_unlock_bh(&q->lock);
+       }
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
+
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+                        bool send_bar)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+               struct ieee80211_txq *txq = sta->txq[i];
+               struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+
+               spin_lock_bh(&mtxq->hwq->lock);
+               mtxq->send_bar = mtxq->aggr && send_bar;
+               if (!list_empty(&mtxq->list))
+                       list_del_init(&mtxq->list);
+               spin_unlock_bh(&mtxq->hwq->lock);
+       }
+}
+EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
+
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+       struct mt76_dev *dev = hw->priv;
+       struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+       struct mt76_queue *hwq = mtxq->hwq;
+
+       spin_lock_bh(&hwq->lock);
+       if (list_empty(&mtxq->list))
+               list_add_tail(&mtxq->list, &hwq->swq);
+       mt76_txq_schedule(dev, hwq);
+       spin_unlock_bh(&hwq->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
+
+void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
+{
+       struct mt76_txq *mtxq;
+       struct mt76_queue *hwq;
+       struct sk_buff *skb;
+
+       if (!txq)
+               return;
+
+       mtxq = (struct mt76_txq *) txq->drv_priv;
+       hwq = mtxq->hwq;
+
+       spin_lock_bh(&hwq->lock);
+       if (!list_empty(&mtxq->list))
+               list_del(&mtxq->list);
+       spin_unlock_bh(&hwq->lock);
+
+       while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
+               ieee80211_free_txskb(dev->hw, skb);
+}
+EXPORT_SYMBOL_GPL(mt76_txq_remove);
+
+void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
+{
+       struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+
+       INIT_LIST_HEAD(&mtxq->list);
+       skb_queue_head_init(&mtxq->retry_q);
+
+       mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
+}
+EXPORT_SYMBOL_GPL(mt76_txq_init);
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
new file mode 100644 (file)
index 0000000..0c35b8d
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include "mt76.h"
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+                int timeout)
+{
+       u32 cur;
+
+       timeout /= 10;
+       do {
+               cur = dev->bus->rr(dev, offset) & mask;
+               if (cur == val)
+                       return true;
+
+               udelay(10);
+       } while (timeout-- > 0);
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll);
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+                     int timeout)
+{
+       u32 cur;
+
+       timeout /= 10;
+       do {
+               cur = dev->bus->rr(dev, offset) & mask;
+               if (cur == val)
+                       return true;
+
+               usleep_range(10000, 20000);
+       } while (timeout-- > 0);
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll_msec);
+
+int mt76_wcid_alloc(unsigned long *mask, int size)
+{
+       int i, idx = 0, cur;
+
+       for (i = 0; i < size / BITS_PER_LONG; i++) {
+               idx = ffs(~mask[i]);
+               if (!idx)
+                       continue;
+
+               idx--;
+               cur = i * BITS_PER_LONG + idx;
+               if (cur >= size)
+                       break;
+
+               mask[i] |= BIT(idx);
+               return cur;
+       }
+
+       return -1;
+}
+EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
new file mode 100644 (file)
index 0000000..018d475
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_UTIL_H
+#define __MT76_UTIL_H
+
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#define MT76_INCR(_var, _size) \
+       _var = (((_var) + 1) % _size)
+
+int mt76_wcid_alloc(unsigned long *mask, int size);
+
+static inline void
+mt76_wcid_free(unsigned long *mask, int idx)
+{
+       mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+}
+
+static inline void
+mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+       if (enable)
+               hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+       else
+               hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
+#endif
index e6668ffb77e65841c909e3d87d1fa17712b5108f..ff0971f1e2c8b18cdc5ea8eac610ef921ab31306 100644 (file)
@@ -688,10 +688,7 @@ static void rtl8225z2_b_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
        cck_power = priv->channels[channel - 1].hw_value & 0xF;
        ofdm_power = priv->channels[channel - 1].hw_value >> 4;
 
-       if (cck_power > 15)
-               cck_power = (priv->hw_rev == RTL8187BvB) ? 15 : 22;
-       else
-               cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7;
+       cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7;
        cck_power += priv->txpwr_base & 0xF;
        cck_power = min(cck_power, (u8)35);
 
index cad2272ae21b434f84b5bf36861f874187dc2fd4..704741d6f4955659e4398e98d47b0d0e0cf7c0c9 100644 (file)
@@ -1726,7 +1726,7 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
 void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
 {
        struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
-       u8 reject_agg, ctrl_agg_size = 0, agg_size;
+       u8 reject_agg = 0, ctrl_agg_size = 0, agg_size = 0;
 
        if (rtlpriv->cfg->ops->get_btc_status())
                btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg,
index c2575b0b94407fa644c1699456b67fb47caa11dd..4013394fac330d84e839e7e1060fb7ead25933d6 100644 (file)
@@ -59,6 +59,7 @@ static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        __le16 fc = rtl_get_fc(skb);
        u8 queue_index = skb_get_queue_mapping(skb);
+       struct ieee80211_hdr *hdr;
 
        if (unlikely(ieee80211_is_beacon(fc)))
                return BEACON_QUEUE;
@@ -67,6 +68,13 @@ static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
        if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
                if (ieee80211_is_nullfunc(fc))
                        return HIGH_QUEUE;
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
+               hdr = rtl_get_hdr(skb);
+
+               if (is_multicast_ether_addr(hdr->addr1) ||
+                   is_broadcast_ether_addr(hdr->addr1))
+                       return HIGH_QUEUE;
+       }
 
        return ac_to_hwq[queue_index];
 }
@@ -557,13 +565,6 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
                else
                        entry = (u8 *)(&ring->desc[ring->idx]);
 
-               if (rtlpriv->cfg->ops->get_available_desc &&
-                   rtlpriv->cfg->ops->get_available_desc(hw, prio) <= 1) {
-                       RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_DMESG,
-                                "no available desc!\n");
-                       return;
-               }
-
                if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
                        return;
                ring->idx = (ring->idx + 1) % ring->entries;
@@ -747,7 +748,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
        u8 tmp_one;
        bool unicast = false;
        u8 hw_queue = 0;
-       unsigned int rx_remained_cnt;
+       unsigned int rx_remained_cnt = 0;
        struct rtl_stats stats = {
                .signal = 0,
                .rate = 0,
@@ -768,7 +769,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                struct sk_buff *new_skb;
 
                if (rtlpriv->use_new_trx_flow) {
-                       rx_remained_cnt =
+                       if (rx_remained_cnt == 0)
+                               rx_remained_cnt =
                                rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
                                                                      hw_queue);
                        if (rx_remained_cnt == 0)
@@ -924,10 +926,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        unsigned long flags;
-       u32 inta = 0;
-       u32 intb = 0;
-       u32 intc = 0;
-       u32 intd = 0;
+       struct rtl_int intvec = {0};
+
        irqreturn_t ret = IRQ_HANDLED;
 
        if (rtlpci->irq_enabled == 0)
@@ -937,47 +937,47 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
        rtlpriv->cfg->ops->disable_interrupt(hw);
 
        /*read ISR: 4/8bytes */
-       rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb, &intc, &intd);
+       rtlpriv->cfg->ops->interrupt_recognized(hw, &intvec);
 
        /*Shared IRQ or HW disappeared */
-       if (!inta || inta == 0xffff)
+       if (!intvec.inta || intvec.inta == 0xffff)
                goto done;
 
        /*<1> beacon related */
-       if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
+       if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
                RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
                         "beacon ok interrupt!\n");
 
-       if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
+       if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
                RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
                         "beacon err interrupt!\n");
 
-       if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
+       if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
                RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
 
-       if (inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
+       if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
                RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
                         "prepare beacon for interrupt!\n");
                tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
        }
 
        /*<2> Tx related */
-       if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
+       if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
                RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
 
-       if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
+       if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
                RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
                         "Manage ok interrupt!\n");
                _rtl_pci_tx_isr(hw, MGNT_QUEUE);
        }
 
-       if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
+       if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
                RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
                         "HIGH_QUEUE ok interrupt!\n");
                _rtl_pci_tx_isr(hw, HIGH_QUEUE);
        }
 
-       if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
+       if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
                rtlpriv->link_info.num_tx_inperiod++;
 
                RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -985,7 +985,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
                _rtl_pci_tx_isr(hw, BK_QUEUE);
        }
 
-       if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
+       if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
                rtlpriv->link_info.num_tx_inperiod++;
 
                RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -993,7 +993,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
                _rtl_pci_tx_isr(hw, BE_QUEUE);
        }
 
-       if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
+       if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
                rtlpriv->link_info.num_tx_inperiod++;
 
                RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1001,7 +1001,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
                _rtl_pci_tx_isr(hw, VI_QUEUE);
        }
 
-       if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
+       if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
                rtlpriv->link_info.num_tx_inperiod++;
 
                RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1010,7 +1010,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
        }
 
        if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
-               if (intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
+               if (intvec.intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
                        rtlpriv->link_info.num_tx_inperiod++;
 
                        RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1020,7 +1020,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
        }
 
        if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
-               if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
+               if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
                        rtlpriv->link_info.num_tx_inperiod++;
 
                        RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1030,25 +1030,25 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
        }
 
        /*<3> Rx related */
-       if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
+       if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
                RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
                _rtl_pci_rx_interrupt(hw);
        }
 
-       if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
+       if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
                         "rx descriptor unavailable!\n");
                _rtl_pci_rx_interrupt(hw);
        }
 
-       if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
+       if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
                _rtl_pci_rx_interrupt(hw);
        }
 
        /*<4> fw related*/
        if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
-               if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
+               if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
                        RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
                                 "firmware interrupt!\n");
                        queue_delayed_work(rtlpriv->works.rtl_wq,
@@ -1064,7 +1064,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
         */
        if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE ||
            rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
-               if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
+               if (unlikely(intvec.inta &
+                   rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
                        RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
                                 "hsisr interrupt!\n");
                        _rtl_pci_hs_interrupt(hw);
@@ -1250,7 +1251,6 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
 
                rtlpci->tx_ring[prio].cur_tx_rp = 0;
                rtlpci->tx_ring[prio].cur_tx_wp = 0;
-               rtlpci->tx_ring[prio].avl_desc = entries;
        }
 
        /* alloc dma for this ring */
@@ -1555,7 +1555,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
                                dev_kfree_skb_irq(skb);
                                ring->idx = (ring->idx + 1) % ring->entries;
                        }
+
+                       if (rtlpriv->use_new_trx_flow) {
+                               rtlpci->tx_ring[i].cur_tx_rp = 0;
+                               rtlpci->tx_ring[i].cur_tx_wp = 0;
+                       }
+
                        ring->idx = 0;
+                       ring->entries = rtlpci->txringcount[i];
                }
        }
        spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
index e7d070e8da2d33f628b6776417b38e201e6e4c8d..3fb56c845a615e9843b976436cf55a0be92e37f1 100644 (file)
@@ -173,7 +173,6 @@ struct rtl8192_tx_ring {
        /*add for new trx flow*/
        struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/
        dma_addr_t buffer_desc_dma; /*tx bufferd desc dma memory*/
-       u16 avl_desc; /* available_desc_to_write */
        u16 cur_tx_wp; /* current_tx_write_point */
        u16 cur_tx_rp; /* current_tx_read_point */
 };
@@ -320,10 +319,10 @@ static inline void pci_write32_async(struct rtl_priv *rtlpriv,
        writel(val, (u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
 }
 
-static inline u16 calc_fifo_space(u16 rp, u16 wp)
+static inline u16 calc_fifo_space(u16 rp, u16 wp, u16 size)
 {
        if (rp <= wp)
-               return RTL_PCI_MAX_RX_COUNT - 1 + rp - wp;
+               return size - 1 + rp - wp;
        return rp - wp - 1;
 }
 
index e30a18e64ff5c91e65ddd03c1d98fb7feb09a3a3..988d5ac57d026bb7e4dc6a5f20acd87b42df69a4 100644 (file)
@@ -1472,17 +1472,16 @@ void rtl88ee_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
-                                 u32 *p_inta, u32 *p_intb,
-                                 u32 *p_intc, u32 *p_intd)
+                                 struct rtl_int *intvec)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-       rtl_write_dword(rtlpriv, ISR, *p_inta);
+       intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+       rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-       *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-       rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+       intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+       rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
 
 }
 
index cdf49de1e6ed8b9b2e38905fdd0b38c021e82f0d..214cd2a3201816e37dc03a8432e76ae0e2adf717 100644 (file)
@@ -29,8 +29,7 @@
 void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
-                                 u32 *p_inta, u32 *p_intb,
-                                 u32 *p_intc, u32 *p_intd);
+                                 struct rtl_int *int_vec);
 int rtl88ee_hw_init(struct ieee80211_hw *hw);
 void rtl88ee_card_disable(struct ieee80211_hw *hw);
 void rtl88ee_enable_interrupt(struct ieee80211_hw *hw);
index 0f4c86a287161537180250fd2dde86a7b5f9c2da..4a81e0ef4b8e102510baa618bbb8b0e6dc8263a3 100644 (file)
@@ -1375,19 +1375,13 @@ void rtl92ce_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
-                                 u32 *p_inta, u32 *p_intb,
-                                 u32 *p_intc, u32 *p_intd)
+                                 struct rtl_int *intvec)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-       rtl_write_dword(rtlpriv, ISR, *p_inta);
-
-       /*
-        * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-        * rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
-        */
+       intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+       rtl_write_dword(rtlpriv, ISR, intvec->inta);
 }
 
 void rtl92ce_set_beacon_related_registers(struct ieee80211_hw *hw)
index b5c8e2fc1ba26aeff73b9b489e0484783e027b44..6711ea1a75d9c0fd8793f3707dcf5452a0762335 100644 (file)
@@ -42,8 +42,7 @@ static inline u8 rtl92c_get_chnl_group(u8 chnl)
 void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
-                                 u32 *p_inta, u32 *p_intb,
-                                 u32 *p_intc, u32 *p_intd);
+                                 struct rtl_int *int_vec);
 int rtl92ce_hw_init(struct ieee80211_hw *hw);
 void rtl92ce_card_disable(struct ieee80211_hw *hw);
 void rtl92ce_enable_interrupt(struct ieee80211_hw *hw);
index 0da6c013685756fc56de63dfa6fe46f4ae10684b..80123fd9722198afd5ce68d816ac0283626f2fce 100644 (file)
@@ -1356,19 +1356,13 @@ void rtl92de_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
-                                 u32 *p_inta, u32 *p_intb,
-                                 u32 *p_intc, u32 *p_intd)
+                                 struct rtl_int *intvec)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-       rtl_write_dword(rtlpriv, ISR, *p_inta);
-
-       /*
-        * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-        * rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
-        */
+       intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+       rtl_write_dword(rtlpriv, ISR, intvec->inta);
 }
 
 void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw)
index 9236aa91273d9f13a883fbc121518fbd0fcce672..e6c702e69ecfa7c172ad8e3ce3bb0693745bb238 100644 (file)
@@ -29,8 +29,7 @@
 void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl92de_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
-                                 u32 *p_inta, u32 *p_intb,
-                                 u32 *p_intc, u32 *p_intd);
+                                 struct rtl_int *int_vec);
 int rtl92de_hw_init(struct ieee80211_hw *hw);
 void rtl92de_card_disable(struct ieee80211_hw *hw);
 void rtl92de_enable_interrupt(struct ieee80211_hw *hw);
index fe5da637e77afc7c09720e4e73193a19cad2e70f..fd7928fdbd1a9606cd9abafbe255c2ee9f6c4c57 100644 (file)
@@ -1694,17 +1694,16 @@ void rtl92ee_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw,
-                                 u32 *p_inta, u32 *p_intb,
-                                 u32 *p_intc, u32 *p_intd)
+                                 struct rtl_int *intvec)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-       rtl_write_dword(rtlpriv, ISR, *p_inta);
+       intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+       rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-       *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-       rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+       intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+       rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
 }
 
 void rtl92ee_set_beacon_related_registers(struct ieee80211_hw *hw)
index cd6d3322f0330d7a4286a05ce1ff082eee633b67..3a63bec9b0cc200257041582e1c74766121a78c1 100644 (file)
@@ -29,8 +29,7 @@
 void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw,
-                                 u32 *p_inta, u32 *p_intb,
-                                 u32 *p_intc, u32 *p_intd);
+                                 struct rtl_int *int_vec);
 int rtl92ee_hw_init(struct ieee80211_hw *hw);
 void rtl92ee_card_disable(struct ieee80211_hw *hw);
 void rtl92ee_enable_interrupt(struct ieee80211_hw *hw);
index 12255682e890ccbc593addbc76e00ce71c6b5d95..4f7444331b07b92f3afed64019358d085aacd383 100644 (file)
@@ -498,7 +498,8 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index)
        if (!start_rx)
                return 0;
 
-       remind_cnt = calc_fifo_space(read_point, write_point);
+       remind_cnt = calc_fifo_space(read_point, write_point,
+                                    RTL_PCI_MAX_RX_COUNT);
 
        if (remind_cnt == 0)
                return 0;
@@ -548,7 +549,6 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index)
 
 u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
 {
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        u16 point_diff = 0;
        u16 current_tx_read_point = 0, current_tx_write_point = 0;
@@ -560,9 +560,9 @@ u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
        current_tx_write_point = (u16)((tmp_4byte) & 0x0fff);
 
        point_diff = calc_fifo_space(current_tx_read_point,
-                                    current_tx_write_point);
+                                    current_tx_write_point,
+                                    TX_DESC_NUM_92E);
 
-       rtlpci->tx_ring[q_idx].avl_desc = point_diff;
        return point_diff;
 }
 
@@ -907,10 +907,6 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
                      u8 desc_name, u8 *val)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u16 cur_tx_rp = 0;
-       u16 cur_tx_wp = 0;
-       static bool over_run;
-       u32 tmp = 0;
        u8 q_idx = *val;
        bool dma64 = rtlpriv->cfg->mod_params->dma64;
 
@@ -931,38 +927,12 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
                                return;
                        }
 
+                       /* make sure tx desc is available by caller */
                        ring->cur_tx_wp = ((ring->cur_tx_wp + 1) % max_tx_desc);
 
-                       if (over_run) {
-                               ring->cur_tx_wp = 0;
-                               over_run = false;
-                       }
-                       if (ring->avl_desc > 1) {
-                               ring->avl_desc--;
-
-                               rtl_write_word(rtlpriv,
-                                              get_desc_addr_fr_q_idx(q_idx),
-                                              ring->cur_tx_wp);
-                       }
-
-                       if (ring->avl_desc < (max_tx_desc - 15)) {
-                               u16 point_diff = 0;
-
-                               tmp =
-                                 rtl_read_dword(rtlpriv,
-                                                get_desc_addr_fr_q_idx(q_idx));
-                               cur_tx_rp = (u16)((tmp >> 16) & 0x0fff);
-                               cur_tx_wp = (u16)(tmp & 0x0fff);
-
-                               ring->cur_tx_wp = cur_tx_wp;
-                               ring->cur_tx_rp = cur_tx_rp;
-                               point_diff = ((cur_tx_rp > cur_tx_wp) ?
-                                             (cur_tx_rp - cur_tx_wp) :
-                                             (TX_DESC_NUM_92E - 1 -
-                                              cur_tx_wp + cur_tx_rp));
-
-                               ring->avl_desc = point_diff;
-                       }
+                       rtl_write_word(rtlpriv,
+                                      get_desc_addr_fr_q_idx(q_idx),
+                                      ring->cur_tx_wp);
                }
                break;
                }
@@ -1044,13 +1014,12 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
 {
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u16 read_point, write_point, available_desc_num;
+       u16 read_point, write_point;
        bool ret = false;
        static u8 stop_report_cnt;
        struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
 
        {
-               u16 point_diff = 0;
                u16 cur_tx_rp, cur_tx_wp;
                u32 tmpu32 = 0;
 
@@ -1060,18 +1029,12 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
                cur_tx_rp = (u16)((tmpu32 >> 16) & 0x0fff);
                cur_tx_wp = (u16)(tmpu32 & 0x0fff);
 
-               ring->cur_tx_wp = cur_tx_wp;
+               /* don't need to update ring->cur_tx_wp */
                ring->cur_tx_rp = cur_tx_rp;
-               point_diff = ((cur_tx_rp > cur_tx_wp) ?
-                             (cur_tx_rp - cur_tx_wp) :
-                             (TX_DESC_NUM_92E - cur_tx_wp + cur_tx_rp));
-
-               ring->avl_desc = point_diff;
        }
 
        read_point = ring->cur_tx_rp;
        write_point = ring->cur_tx_wp;
-       available_desc_num = ring->avl_desc;
 
        if (write_point > read_point) {
                if (index < write_point && index >= read_point)
index 76bf089cced4828353bf6f56189af393b0afac0f..30dea7b9bc17fff06dee326e2ca47a72a1d026d6 100644 (file)
@@ -1558,17 +1558,17 @@ void rtl92se_card_disable(struct ieee80211_hw *hw)
        udelay(100);
 }
 
-void rtl92se_interrupt_recognized(struct ieee80211_hw *hw, u32 *p_inta,
-                            u32 *p_intb, u32 *p_intc, u32 *p_intd)
+void rtl92se_interrupt_recognized(struct ieee80211_hw *hw,
+                                 struct rtl_int *intvec)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-       rtl_write_dword(rtlpriv, ISR, *p_inta);
+       intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+       rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-       *p_intb = rtl_read_dword(rtlpriv, ISR + 4) & rtlpci->irq_mask[1];
-       rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
+       intvec->intb = rtl_read_dword(rtlpriv, ISR + 4) & rtlpci->irq_mask[1];
+       rtl_write_dword(rtlpriv, ISR + 4, intvec->intb);
 }
 
 void rtl92se_set_beacon_related_registers(struct ieee80211_hw *hw)
index 607056010974bf476b8bbbf5f11e527391004510..fa836ceefc8f8934fa1aecd7b068e2b8ddabdfcb 100644 (file)
@@ -42,8 +42,7 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw,
                        u8 variable, u8 *val);
 void rtl92se_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92se_interrupt_recognized(struct ieee80211_hw *hw,
-                                 u32 *p_inta, u32 *p_intb,
-                                 u32 *p_intc, u32 *p_intd);
+                                 struct rtl_int *int_vec);
 int rtl92se_hw_init(struct ieee80211_hw *hw);
 void rtl92se_card_disable(struct ieee80211_hw *hw);
 void rtl92se_enable_interrupt(struct ieee80211_hw *hw);
index c3f98d58124cd66860dd16babbc7d24bd9b2d257..545115db507e71c9876b66e53ceee5486b76e1a1 100644 (file)
@@ -1340,14 +1340,13 @@ void rtl8723e_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw,
-                                  u32 *p_inta, u32 *p_intb,
-                                  u32 *p_intc, u32 *p_intd)
+                                  struct rtl_int *intvec)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       *p_inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0];
-       rtl_write_dword(rtlpriv, 0x3a0, *p_inta);
+       intvec->inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0];
+       rtl_write_dword(rtlpriv, 0x3a0, intvec->inta);
 }
 
 void rtl8723e_set_beacon_related_registers(struct ieee80211_hw *hw)
index 19e467a37c72408e1dc4b7c632dd8f6284653e15..c76e453f4f43793eaf7e17f000d98afa76653582 100644 (file)
@@ -34,8 +34,7 @@ void rtl8723e_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw);
 
 void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw,
-                                  u32 *p_inta, u32 *p_intb,
-                                  u32 *p_intc, u32 *p_intd);
+                                  struct rtl_int *int_vec);
 int rtl8723e_hw_init(struct ieee80211_hw *hw);
 void rtl8723e_card_disable(struct ieee80211_hw *hw);
 void rtl8723e_enable_interrupt(struct ieee80211_hw *hw);
index 7cd1ffa7d4a7f16cefd543b883cde43187630489..f9ccd13c79f94e230bfaaa3b40d23e78dc09226c 100644 (file)
@@ -43,6 +43,7 @@
 #include "../pwrseqcmd.h"
 #include "pwrseq.h"
 #include "../btcoexist/rtl_btc.h"
+#include <linux/kernel.h>
 
 #define LLT_CONFIG     5
 
@@ -1682,18 +1683,17 @@ void rtl8723be_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
-                                   u32 *p_inta, u32 *p_intb,
-                                   u32 *p_intc, u32 *p_intd)
+                                   struct rtl_int *intvec)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-       rtl_write_dword(rtlpriv, ISR, *p_inta);
+       intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+       rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-       *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) &
-                                       rtlpci->irq_mask[1];
-       rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+       intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) &
+                                     rtlpci->irq_mask[1];
+       rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
 }
 
 void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw)
@@ -2127,28 +2127,28 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
 
        if (rtlhal->oem_id == RT_CID_DEFAULT) {
                /* Does this one have a Toshiba SMID from group 1? */
-               for (i = 0; i < sizeof(toshiba_smid1) / sizeof(u16); i++) {
+               for (i = 0; i < ARRAY_SIZE(toshiba_smid1); i++) {
                        if (rtlefuse->eeprom_smid == toshiba_smid1[i]) {
                                is_toshiba_smid1 = true;
                                break;
                        }
                }
                /* Does this one have a Toshiba SMID from group 2? */
-               for (i = 0; i < sizeof(toshiba_smid2) / sizeof(u16); i++) {
+               for (i = 0; i < ARRAY_SIZE(toshiba_smid2); i++) {
                        if (rtlefuse->eeprom_smid == toshiba_smid2[i]) {
                                is_toshiba_smid2 = true;
                                break;
                        }
                }
                /* Does this one have a Samsung SMID? */
-               for (i = 0; i < sizeof(samsung_smid) / sizeof(u16); i++) {
+               for (i = 0; i < ARRAY_SIZE(samsung_smid); i++) {
                        if (rtlefuse->eeprom_smid == samsung_smid[i]) {
                                is_samsung_smid = true;
                                break;
                        }
                }
                /* Does this one have a Lenovo SMID? */
-               for (i = 0; i < sizeof(lenovo_smid) / sizeof(u16); i++) {
+               for (i = 0; i < ARRAY_SIZE(lenovo_smid); i++) {
                        if (rtlefuse->eeprom_smid == lenovo_smid[i]) {
                                is_lenovo_smid = true;
                                break;
index 2215a792f6bf0ee5a797c9ce00dbeae35b993220..ae856a19e81a16d7d5a108750f234bb48dd0d495 100644 (file)
@@ -30,8 +30,7 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw);
 
 void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
-                                   u32 *p_inta, u32 *p_intb,
-                                   u32 *p_intc, u32 *p_intd);
+                                   struct rtl_int *int_vec);
 int rtl8723be_hw_init(struct ieee80211_hw *hw);
 void rtl8723be_card_disable(struct ieee80211_hw *hw);
 void rtl8723be_enable_interrupt(struct ieee80211_hw *hw);
index 9606641519e71dcbe84dc85c3bced929a396cfa6..1263b12db5dc9172273766529f7b7ec015719f51 100644 (file)
@@ -35,6 +35,7 @@
 #include "../rtl8723com/dm_common.h"
 #include "table.h"
 #include "trx.h"
+#include <linux/kernel.h>
 
 static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw);
 static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
@@ -1143,14 +1144,13 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
                             DESC92C_RATEMCS2, DESC92C_RATEMCS3,
                             DESC92C_RATEMCS4, DESC92C_RATEMCS5,
                             DESC92C_RATEMCS6, DESC92C_RATEMCS7};
-       u8 i, size;
+       u8 i;
        u8 power_index;
 
        if (!rtlefuse->txpwr_fromeprom)
                return;
 
-       size = sizeof(cck_rates) / sizeof(u8);
-       for (i = 0; i < size; i++) {
+       for (i = 0; i < ARRAY_SIZE(cck_rates); i++) {
                power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
                                        cck_rates[i],
                                        rtl_priv(hw)->phy.current_chan_bw,
@@ -1158,8 +1158,7 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
                _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
                                                 cck_rates[i]);
        }
-       size = sizeof(ofdm_rates) / sizeof(u8);
-       for (i = 0; i < size; i++) {
+       for (i = 0; i < ARRAY_SIZE(ofdm_rates); i++) {
                power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
                                        ofdm_rates[i],
                                        rtl_priv(hw)->phy.current_chan_bw,
@@ -1167,8 +1166,7 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
                _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
                                                 ofdm_rates[i]);
        }
-       size = sizeof(ht_rates_1t) / sizeof(u8);
-       for (i = 0; i < size; i++) {
+       for (i = 0; i < ARRAY_SIZE(ht_rates_1t); i++) {
                power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
                                        ht_rates_1t[i],
                                        rtl_priv(hw)->phy.current_chan_bw,
index 381c16b9b3a9008250a226d7724558c85df647e9..160fee8333aeff892adf6f1b07bc7cf5e23c8caf 100644 (file)
@@ -25,6 +25,7 @@
  *
  *****************************************************************************/
 
+#include <linux/kernel.h>
 #include "table.h"
 
 u32 RTL8723BEPHY_REG_1TARRAY[] = {
@@ -224,8 +225,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = {
 
 };
 
-u32 RTL8723BEPHY_REG_1TARRAYLEN =
-       sizeof(RTL8723BEPHY_REG_1TARRAY) / sizeof(u32);
+u32 RTL8723BEPHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8723BEPHY_REG_1TARRAY);
 
 u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
        0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003800,
@@ -236,8 +236,7 @@ u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
        0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436
 };
 
-u32 RTL8723BEPHY_REG_ARRAY_PGLEN =
-               sizeof(RTL8723BEPHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8723BEPHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8723BEPHY_REG_ARRAY_PG);
 
 u32 RTL8723BE_RADIOA_1TARRAY[] = {
                0x000, 0x00010000,
@@ -373,8 +372,7 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = {
 
 };
 
-u32 RTL8723BE_RADIOA_1TARRAYLEN =
-       sizeof(RTL8723BE_RADIOA_1TARRAY) / sizeof(u32);
+u32 RTL8723BE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8723BE_RADIOA_1TARRAY);
 
 u32 RTL8723BEMAC_1T_ARRAY[] = {
                0x02F, 0x00000030,
@@ -483,7 +481,7 @@ u32 RTL8723BEMAC_1T_ARRAY[] = {
 
 };
 
-u32 RTL8723BEMAC_1T_ARRAYLEN = sizeof(RTL8723BEMAC_1T_ARRAY) / sizeof(u32);
+u32 RTL8723BEMAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8723BEMAC_1T_ARRAY);
 
 u32 RTL8723BEAGCTAB_1TARRAY[] = {
                0xC78, 0xFD000001,
@@ -620,4 +618,4 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = {
 
 };
 
-u32 RTL8723BEAGCTAB_1TARRAYLEN = sizeof(RTL8723BEAGCTAB_1TARRAY) / sizeof(u32);
+u32 RTL8723BEAGCTAB_1TARRAYLEN = ARRAY_SIZE(RTL8723BEAGCTAB_1TARRAY);
index 43e18c4c1e68e9e17fbae576fda6549e2398768c..f20e77b4bb65fda713fa5445e6963af977c92f3a 100644 (file)
@@ -2483,17 +2483,16 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
-                                   u32 *p_inta, u32 *p_intb,
-                                   u32 *p_intc, u32 *p_intd)
+                                   struct rtl_int *intvec)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-       rtl_write_dword(rtlpriv, ISR, *p_inta);
+       intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+       rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-       *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-       rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+       intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+       rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
 }
 
 void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw)
index 284d259fe55791db0e202c09f45150facf851598..e2ab783a2ad912faf4fb270ba1396447b3252585 100644 (file)
@@ -30,8 +30,7 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw);
 
 void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
-                                   u32 *p_inta, u32 *p_intb,
-                                   u32 *p_intc, u32 *p_intd);
+                                   struct rtl_int *int_vec);
 int rtl8821ae_hw_init(struct ieee80211_hw *hw);
 void rtl8821ae_card_disable(struct ieee80211_hw *hw);
 void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw);
index 408c4611e5dee5798911d67d1915e6afd06f3e82..f87f9d03b9fa2f1a5ec1b193ad0e8c36cccb8521 100644 (file)
@@ -24,7 +24,7 @@
  * Larry Finger <Larry.Finger@lwfinger.net>
  *
  *****************************************************************************/
-
+#include <linux/kernel.h>
 #include "table.h"
 u32 RTL8812AE_PHY_REG_ARRAY[] = {
                0x800, 0x8020D010,
@@ -258,8 +258,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
                0xEB8, 0x00508242,
 };
 
-u32 RTL8812AE_PHY_REG_1TARRAYLEN =
-       sizeof(RTL8812AE_PHY_REG_ARRAY) / sizeof(u32);
+u32 RTL8812AE_PHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_PHY_REG_ARRAY);
 
 u32 RTL8821AE_PHY_REG_ARRAY[] = {
        0x800, 0x0020D090,
@@ -436,8 +435,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
        0xCB8, 0x00508240,
 };
 
-u32 RTL8821AE_PHY_REG_1TARRAYLEN =
-       sizeof(RTL8821AE_PHY_REG_ARRAY) / sizeof(u32);
+u32 RTL8821AE_PHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_PHY_REG_ARRAY);
 
 u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
        0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840,
@@ -488,8 +486,7 @@ u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
        1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628
 };
 
-u32 RTL8812AE_PHY_REG_ARRAY_PGLEN =
-               sizeof(RTL8812AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8812AE_PHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8812AE_PHY_REG_ARRAY_PG);
 
 u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
        0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
@@ -509,8 +506,7 @@ u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
        1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022
 };
 
-u32 RTL8821AE_PHY_REG_ARRAY_PGLEN =
-               sizeof(RTL8821AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8821AE_PHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8821AE_PHY_REG_ARRAY_PG);
 
 u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x000, 0x00010000,
@@ -927,7 +923,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x018, 0x0001712A,
 };
 
-u32 RTL8812AE_RADIOA_1TARRAYLEN = sizeof(RTL8812AE_RADIOA_ARRAY) / sizeof(u32);
+u32 RTL8812AE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_RADIOA_ARRAY);
 
 u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x056, 0x00051CF2,
@@ -1335,7 +1331,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x008, 0x00008400,
 };
 
-u32 RTL8812AE_RADIOB_1TARRAYLEN = sizeof(RTL8812AE_RADIOB_ARRAY) / sizeof(u32);
+u32 RTL8812AE_RADIOB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_RADIOB_ARRAY);
 
 u32 RTL8821AE_RADIOA_ARRAY[] = {
                0x018, 0x0001712A,
@@ -1929,7 +1925,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
 
 };
 
-u32 RTL8821AE_RADIOA_1TARRAYLEN = sizeof(RTL8821AE_RADIOA_ARRAY) / sizeof(u32);
+u32 RTL8821AE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_RADIOA_ARRAY);
 
 u32 RTL8812AE_MAC_REG_ARRAY[] = {
                0x010, 0x0000000C,
@@ -2041,7 +2037,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
                0x718, 0x00000040,
 };
 
-u32 RTL8812AE_MAC_1T_ARRAYLEN = sizeof(RTL8812AE_MAC_REG_ARRAY) / sizeof(u32);
+u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
 
 u32 RTL8821AE_MAC_REG_ARRAY[] = {
                0x428, 0x0000000A,
@@ -2143,7 +2139,7 @@ u32 RTL8821AE_MAC_REG_ARRAY[] = {
                0x718, 0x00000040,
 };
 
-u32 RTL8821AE_MAC_1T_ARRAYLEN = sizeof(RTL8821AE_MAC_REG_ARRAY) / sizeof(u32);
+u32 RTL8821AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8821AE_MAC_REG_ARRAY);
 
 u32 RTL8812AE_AGC_TAB_ARRAY[] = {
        0x80000001, 0x00000000, 0x40000000, 0x00000000,
@@ -2479,8 +2475,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
                0xE50, 0x00000020,
 };
 
-u32 RTL8812AE_AGC_TAB_1TARRAYLEN =
-       sizeof(RTL8812AE_AGC_TAB_ARRAY) / sizeof(u32);
+u32 RTL8812AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_AGC_TAB_ARRAY);
 
 u32 RTL8821AE_AGC_TAB_ARRAY[] = {
                0x81C, 0xBF000001,
@@ -2676,8 +2671,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
                0xC50, 0x00000020,
 };
 
-u32 RTL8821AE_AGC_TAB_1TARRAYLEN =
-       sizeof(RTL8821AE_AGC_TAB_ARRAY) / sizeof(u32);
+u32 RTL8821AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_AGC_TAB_ARRAY);
 
 /******************************************************************************
 *                           TXPWR_LMT.TXT
@@ -3250,7 +3244,7 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
        "MKK", "5G", "80M", "VHT", "2T", "155", "63"
 };
 
-u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8812AE_TXPWR_LMT) / sizeof(u8 *);
+u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8812AE_TXPWR_LMT);
 
 u8 *RTL8821AE_TXPWR_LMT[] = {
        "FCC", "2.4G", "20M", "CCK", "1T", "01", "32",
@@ -3819,4 +3813,4 @@ u8 *RTL8821AE_TXPWR_LMT[] = {
        "MKK", "5G", "80M", "VHT", "2T", "155", "63"
 };
 
-u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8821AE_TXPWR_LMT) / sizeof(u8 *);
+u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8821AE_TXPWR_LMT);
index 92d4859ec9067f708822276777d1c149cdfc1e98..e2b14793b705c4b9490d7946d0aa0b418d12cf2a 100644 (file)
@@ -2093,14 +2093,21 @@ struct rtl_wow_pattern {
        u32 mask[4];
 };
 
+/* struct to store contents of interrupt vectors */
+struct rtl_int {
+       u32 inta;
+       u32 intb;
+       u32 intc;
+       u32 intd;
+};
+
 struct rtl_hal_ops {
        int (*init_sw_vars) (struct ieee80211_hw *hw);
        void (*deinit_sw_vars) (struct ieee80211_hw *hw);
        void (*read_chip_version)(struct ieee80211_hw *hw);
        void (*read_eeprom_info) (struct ieee80211_hw *hw);
        void (*interrupt_recognized) (struct ieee80211_hw *hw,
-                                     u32 *p_inta, u32 *p_intb,
-                                     u32 *p_intc, u32 *p_intd);
+                                     struct rtl_int *intvec);
        int (*hw_init) (struct ieee80211_hw *hw);
        void (*hw_disable) (struct ieee80211_hw *hw);
        void (*hw_suspend) (struct ieee80211_hw *hw);
index 1d799bffaa9f35ab772057beae1ba067d8b406df..e7d77acdf18c33d9b0346a97348c0d0d5b932d33 100644 (file)
@@ -310,10 +310,8 @@ static int wl1251_hw_init_data_path_config(struct wl1251 *wl)
        /* asking for the data path parameters */
        wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp),
                                GFP_KERNEL);
-       if (!wl->data_path) {
-               wl1251_error("Couldnt allocate data path parameters");
+       if (!wl->data_path)
                return -ENOMEM;
-       }
 
        ret = wl1251_acx_data_path_params(wl, wl->data_path);
        if (ret < 0) {
index a4859993db3c600bb2cd4cadf2e30794da19f338..3ca9167d6146af610c445f358bf45b2bebd52c03 100644 (file)
@@ -146,7 +146,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        ret = wl1271_cmd_configure(wl, ACX_FEATURE_CFG,
                                   feature, sizeof(*feature));
        if (ret < 0) {
-               wl1271_error("Couldnt set HW encryption");
+               wl1271_error("Couldn't set HW encryption");
                goto out;
        }
 
index f46d7fdf9a0008bf1cae968fa8ab36ac09da00d5..7011c5d9599f4ee73884a57ac4b2a1719471ff4e 100644 (file)
@@ -1129,10 +1129,8 @@ int wl12xx_acx_config_hangover(struct wl1271 *wl);
 int wlcore_acx_average_rssi(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                            s8 *avg_rssi);
 
-#ifdef CONFIG_PM
 int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
                                        enum rx_filter_action action);
 int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
                             struct wl12xx_rx_filter *filter);
-#endif /* CONFIG_PM */
 #endif /* __WL1271_ACX_H__ */
index d47921a845098815652a46f7fc16cdf8bccf3488..09714034dbf1cd246f5a398aad30384768fa066c 100644 (file)
@@ -42,6 +42,7 @@
 #include "sysfs.h"
 
 #define WL1271_BOOT_RETRIES 3
+#define WL1271_SUSPEND_SLEEP 100
 
 static char *fwlog_param;
 static int fwlog_mem_blocks = -1;
@@ -388,7 +389,6 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
 {
        struct wl12xx_vif *wlvif;
-       struct timespec ts;
        u32 old_tx_blk_count = wl->tx_blocks_available;
        int avail, freed_blocks;
        int i;
@@ -485,8 +485,7 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
        }
 
        /* update the host-chipset time offset */
-       getnstimeofday(&ts);
-       wl->time_offset = (timespec_to_ns(&ts) >> 10) -
+       wl->time_offset = (ktime_get_boot_ns() >> 10) -
                (s64)(status->fw_localtime);
 
        wl->fw_fast_lnk_map = status->link_fast_bitmap;
@@ -979,6 +978,24 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
        return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
 }
 
+static int wlcore_fw_sleep(struct wl1271 *wl)
+{
+       int ret;
+
+       mutex_lock(&wl->mutex);
+       ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
+       if (ret < 0) {
+               wl12xx_queue_recovery_work(wl);
+               goto out;
+       }
+       set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
+out:
+       mutex_unlock(&wl->mutex);
+       mdelay(WL1271_SUSPEND_SLEEP);
+
+       return 0;
+}
+
 static int wl1271_setup(struct wl1271 *wl)
 {
        wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
@@ -1326,7 +1343,6 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
 }
 
 
-#ifdef CONFIG_PM
 static int
 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
 {
@@ -1698,8 +1714,8 @@ static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        }
 }
 
-static int wl1271_op_suspend(struct ieee80211_hw *hw,
-                           struct cfg80211_wowlan *wow)
+static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
+                                           struct cfg80211_wowlan *wow)
 {
        struct wl1271 *wl = hw->priv;
        struct wl12xx_vif *wlvif;
@@ -1749,7 +1765,6 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
                goto out_sleep;
 
 out_sleep:
-       wl1271_ps_elp_sleep(wl);
        mutex_unlock(&wl->mutex);
 
        if (ret < 0) {
@@ -1782,10 +1797,19 @@ out_sleep:
         */
        cancel_delayed_work(&wl->tx_watchdog_work);
 
+       /*
+        * Use an immediate call for allowing the firmware to go into power
+        * save during suspend.
+        * Using a workque for this last write was only hapenning on resume
+        * leaving the firmware with power save disabled during suspend,
+        * while consuming full power during wowlan suspend.
+        */
+       wlcore_fw_sleep(wl);
+
        return 0;
 }
 
-static int wl1271_op_resume(struct ieee80211_hw *hw)
+static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
 {
        struct wl1271 *wl = hw->priv;
        struct wl12xx_vif *wlvif;
@@ -1869,7 +1893,6 @@ out:
 
        return 0;
 }
-#endif
 
 static int wl1271_op_start(struct ieee80211_hw *hw)
 {
index a3f5e9ca492af5815a63b75f1e486709f4668045..00e9b4624dcf452c960fe6bdf32e00a02db67f73 100644 (file)
@@ -264,7 +264,6 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                               struct sk_buff *skb, u32 extra,
                               struct ieee80211_tx_info *control, u8 hlid)
 {
-       struct timespec ts;
        struct wl1271_tx_hw_descr *desc;
        int ac, rate_idx;
        s64 hosttime;
@@ -287,8 +286,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        }
 
        /* configure packet life time */
-       getnstimeofday(&ts);
-       hosttime = (timespec_to_ns(&ts) >> 10);
+       hosttime = (ktime_get_boot_ns() >> 10);
        desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
 
        is_dummy = wl12xx_is_dummy_packet(wl, skb);
index f837d666cbd499c8e33a1514f55344a1796005a1..1e46e60b8f1080e339ebe81c1710dabb23afef75 100644 (file)
@@ -1287,7 +1287,7 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl,
        BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
                        NVME_DSM_MAX_RANGES);
 
-       queue->limits.discard_alignment = size;
+       queue->limits.discard_alignment = 0;
        queue->limits.discard_granularity = size;
 
        blk_queue_max_discard_sectors(queue, UINT_MAX);
@@ -1705,7 +1705,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
                blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
-       if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+       if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+           is_power_of_2(ctrl->max_hw_sectors))
                blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
        blk_queue_virt_boundary(q, ctrl->page_size - 1);
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
@@ -2869,7 +2870,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
        blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
        nvme_set_queue_limits(ctrl, ns->queue);
-       nvme_setup_streams_ns(ctrl, ns);
 
        id = nvme_identify_ns(ctrl, nsid);
        if (!id)
@@ -2880,6 +2880,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
        if (nvme_init_ns_head(ns, nsid, id, &new))
                goto out_free_id;
+       nvme_setup_streams_ns(ctrl, ns);
        
 #ifdef CONFIG_NVME_MULTIPATH
        /*
@@ -2965,8 +2966,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                return;
 
        if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
-               if (blk_get_integrity(ns->disk))
-                       blk_integrity_unregister(ns->disk);
                nvme_mpath_remove_disk_links(ns);
                sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
                                        &nvme_ns_id_attr_group);
@@ -2974,6 +2973,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                        nvme_nvm_unregister_sysfs(ns);
                del_gendisk(ns->disk);
                blk_cleanup_queue(ns->queue);
+               if (blk_get_integrity(ns->disk))
+                       blk_integrity_unregister(ns->disk);
        }
 
        mutex_lock(&ns->ctrl->subsys->lock);
index 0a8af4daef8903f8ba983d345f1044498c57a975..794e66e4aa20115f4dc3a6b5fc12f706b2040bf4 100644 (file)
@@ -3221,7 +3221,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
                /* initiate nvme ctrl ref counting teardown */
                nvme_uninit_ctrl(&ctrl->ctrl);
-               nvme_put_ctrl(&ctrl->ctrl);
 
                /* Remove core ctrl ref. */
                nvme_put_ctrl(&ctrl->ctrl);
index f4c73292b304720417524fac4c17361eb0c976e4..1b9ef35cf0d9612b8e7e62bd6d717acc10bdef91 100644 (file)
@@ -77,9 +77,10 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
        if (of_property_read_bool(child, "broken-turn-around"))
                mdio->phy_ignore_ta_mask |= 1 << addr;
 
-       of_property_read_u32(child, "reset-delay-us", &phy->mdio.reset_delay);
-       of_property_read_u32(child, "reset-post-delay-us",
-                            &phy->mdio.reset_post_delay);
+       of_property_read_u32(child, "reset-assert-us",
+                            &phy->mdio.reset_assert_delay);
+       of_property_read_u32(child, "reset-deassert-us",
+                            &phy->mdio.reset_deassert_delay);
 
        /* Associate the OF node with the device structure so it
         * can be looked up later */
index a25fed52f7e94de4bd3dd5cb8b0922e1df8e81bf..41b740aed3a346e4bbc610959281649447f83bd4 100644 (file)
@@ -1692,3 +1692,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
        iounmap(base_addr);
 }
 
+
+/*
+ * The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
+ * seems rushed, so that many built-in components simply don't work.
+ * The following quirks disable the serial AUX port and the built-in ATI RV100
+ * Radeon 7000 graphics card which both don't have any external connectors and
+ * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
+ * such makes those machines the only PARISC machines on which we can't use
+ * ttyS0 as boot console.
+ */
+static void quirk_diva_ati_card(struct pci_dev *dev)
+{
+       if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+           dev->subsystem_device != 0x1292)
+               return;
+
+       dev_info(&dev->dev, "Hiding Diva built-in ATI card");
+       dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
+       quirk_diva_ati_card);
+
+static void quirk_diva_aux_disable(struct pci_dev *dev)
+{
+       if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+           dev->subsystem_device != 0x1291)
+               return;
+
+       dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
+       dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
+       quirk_diva_aux_disable);
index 945099d49f8f9b08c2b159d2cf4899b1fdc4cea4..14fd865a512096393149fd63a3707305648f276f 100644 (file)
@@ -1012,7 +1012,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
        if (pci_has_legacy_pm_support(pci_dev))
                return pci_legacy_resume_early(dev);
 
-       pci_update_current_state(pci_dev, PCI_D0);
+       /*
+        * pci_restore_state() requires the device to be in D0 (because of MSI
+        * restoration among other things), so force it into D0 in case the
+        * driver's "freeze" callbacks put it into a low-power state directly.
+        */
+       pci_set_power_state(pci_dev, PCI_D0);
        pci_restore_state(pci_dev);
 
        if (drv && drv->pm && drv->pm->thaw_noirq)
index a782a207ad31c31f996833ed3d0ef3aa8ff04238..c7e484f706543ebdc2ce63084cf1d45a2146869e 100644 (file)
@@ -91,9 +91,6 @@ config QETH_L3
          To compile as a module choose M. The module name is qeth_l3.
          If unsure, choose Y.
 
-config QETH_IPV6
-       def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
-
 config CCWGROUP
        tristate
        default (LCS || CTCM || QETH)
index 92ae84a927fcf391abebaccbd907d5d962ed9bed..0ee8f33efb544877c37af645d348918078d88957 100644 (file)
@@ -756,18 +756,14 @@ lcs_get_lancmd(struct lcs_card *card, int count)
 static void
 lcs_get_reply(struct lcs_reply *reply)
 {
-       WARN_ON(atomic_read(&reply->refcnt) <= 0);
-       atomic_inc(&reply->refcnt);
+       refcount_inc(&reply->refcnt);
 }
 
 static void
 lcs_put_reply(struct lcs_reply *reply)
 {
-        WARN_ON(atomic_read(&reply->refcnt) <= 0);
-        if (atomic_dec_and_test(&reply->refcnt)) {
+       if (refcount_dec_and_test(&reply->refcnt))
                kfree(reply);
-       }
-
 }
 
 static struct lcs_reply *
@@ -780,7 +776,7 @@ lcs_alloc_reply(struct lcs_cmd *cmd)
        reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
        if (!reply)
                return NULL;
-       atomic_set(&reply->refcnt,1);
+       refcount_set(&reply->refcnt, 1);
        reply->sequence_no = cmd->sequence_no;
        reply->received = 0;
        reply->rc = 0;
index fbc8b90b1f85a098b8761602454e1d1d0506f916..bd52caa3b11b54b5880ba739fc0108dafd48d374 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/workqueue.h>
+#include <linux/refcount.h>
 #include <asm/ccwdev.h>
 
 #define LCS_DBF_TEXT(level, name, text) \
@@ -271,7 +272,7 @@ struct lcs_buffer {
 struct lcs_reply {
        struct list_head list;
        __u16 sequence_no;
-       atomic_t refcnt;
+       refcount_t refcnt;
        /* Callback for completion notification. */
        void (*callback)(struct lcs_card *, struct lcs_cmd *);
        wait_queue_head_t wait_q;
index badf42acbf95b8104d6167dfc811640b1ec311df..db42107bf2f5f040734ebbda0ea8abaf7453edbc 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/ethtool.h>
 #include <linux/hashtable.h>
 #include <linux/ip.h>
+#include <linux/refcount.h>
 
 #include <net/ipv6.h>
 #include <net/if_inet6.h>
@@ -296,8 +297,23 @@ struct qeth_hdr_layer3 {
        __u8  ext_flags;
        __u16 vlan_id;
        __u16 frame_offset;
-       __u8  dest_addr[16];
-} __attribute__ ((packed));
+       union {
+               /* TX: */
+               u8 ipv6_addr[16];
+               struct ipv4 {
+                       u8 res[12];
+                       u32 addr;
+               } ipv4;
+               /* RX: */
+               struct rx {
+                       u8 res1[2];
+                       u8 src_mac[6];
+                       u8 res2[4];
+                       u16 vlan_id;
+                       u8 res3[2];
+               } rx;
+       } next_hop;
+};
 
 struct qeth_hdr_layer2 {
        __u8 id;
@@ -504,12 +520,6 @@ struct qeth_qdio_info {
        int default_out_queue;
 };
 
-#define QETH_ETH_MAC_V4      0x0100 /* like v4 */
-#define QETH_ETH_MAC_V6      0x3333 /* like v6 */
-/* tr mc mac is longer, but that will be enough to detect mc frames */
-#define QETH_TR_MAC_NC       0xc000 /* non-canonical */
-#define QETH_TR_MAC_C        0x0300 /* canonical */
-
 /**
  * buffer stuff for read channel
  */
@@ -632,7 +642,7 @@ struct qeth_reply {
        int rc;
        void *param;
        struct qeth_card *card;
-       atomic_t refcnt;
+       refcount_t refcnt;
 };
 
 struct qeth_card_blkt {
@@ -846,14 +856,16 @@ static inline int qeth_get_micros(void)
 
 static inline int qeth_get_ip_version(struct sk_buff *skb)
 {
-       __be16 *p = &((struct ethhdr *)skb->data)->h_proto;
+       struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
+       __be16 prot = veth->h_vlan_proto;
+
+       if (prot == htons(ETH_P_8021Q))
+               prot = veth->h_vlan_encapsulated_proto;
 
-       if (be16_to_cpu(*p) == ETH_P_8021Q)
-               p += 2;
-       switch (be16_to_cpu(*p)) {
-       case ETH_P_IPV6:
+       switch (prot) {
+       case htons(ETH_P_IPV6):
                return 6;
-       case ETH_P_IP:
+       case htons(ETH_P_IP):
                return 4;
        default:
                return 0;
index 6c815207f4f50470c1a0fea8992183aa67f69e37..bdc28330800ec5d403c43198df69c7f311f8379f 100644 (file)
@@ -564,7 +564,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
 
        reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
        if (reply) {
-               atomic_set(&reply->refcnt, 1);
+               refcount_set(&reply->refcnt, 1);
                atomic_set(&reply->received, 0);
                reply->card = card;
        }
@@ -573,14 +573,12 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
 
 static void qeth_get_reply(struct qeth_reply *reply)
 {
-       WARN_ON(atomic_read(&reply->refcnt) <= 0);
-       atomic_inc(&reply->refcnt);
+       refcount_inc(&reply->refcnt);
 }
 
 static void qeth_put_reply(struct qeth_reply *reply)
 {
-       WARN_ON(atomic_read(&reply->refcnt) <= 0);
-       if (atomic_dec_and_test(&reply->refcnt))
+       if (refcount_dec_and_test(&reply->refcnt))
                kfree(reply);
 }
 
@@ -4218,9 +4216,8 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
        cmd = (struct qeth_ipa_cmd *) data;
        if (!card->options.layer2 ||
            !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
-               memcpy(card->dev->dev_addr,
-                      &cmd->data.setadapterparms.data.change_addr.addr,
-                      OSA_ADDR_LEN);
+               ether_addr_copy(card->dev->dev_addr,
+                               cmd->data.setadapterparms.data.change_addr.addr);
                card->info.mac_bits |= QETH_LAYER2_MAC_READ;
        }
        qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
@@ -4242,9 +4239,9 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
                return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
-       cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
-       memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
-              card->dev->dev_addr, OSA_ADDR_LEN);
+       cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
+       ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
+                       card->dev->dev_addr);
        rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
                               NULL);
        return rc;
@@ -5386,6 +5383,13 @@ out:
 }
 EXPORT_SYMBOL_GPL(qeth_poll);
 
+static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
+{
+       if (!cmd->hdr.return_code)
+               cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+       return cmd->hdr.return_code;
+}
+
 int qeth_setassparms_cb(struct qeth_card *card,
                        struct qeth_reply *reply, unsigned long data)
 {
@@ -6242,7 +6246,7 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
                                (struct qeth_checksum_cmd *)reply->param;
 
        QETH_CARD_TEXT(card, 4, "chkdoccb");
-       if (cmd->hdr.return_code)
+       if (qeth_setassparms_inspect_rc(cmd))
                return 0;
 
        memset(chksum_cb, 0, sizeof(*chksum_cb));
index ff6877f7b6f80ef43864b5623dbb0dfebc0c2d8e..619f897b4bb0cef7be641f6e45c8f31a1502409c 100644 (file)
@@ -10,6 +10,7 @@
 #define __QETH_CORE_MPC_H__
 
 #include <asm/qeth.h>
+#include <uapi/linux/if_ether.h>
 
 #define IPA_PDU_HEADER_SIZE    0x40
 #define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
@@ -25,7 +26,6 @@ extern unsigned char IPA_PDU_HEADER[];
 #define QETH_SEQ_NO_LENGTH     4
 #define QETH_MPC_TOKEN_LENGTH  4
 #define QETH_MCL_LENGTH                4
-#define OSA_ADDR_LEN           6
 
 #define QETH_TIMEOUT           (10 * HZ)
 #define QETH_IPA_TIMEOUT       (45 * HZ)
@@ -416,12 +416,11 @@ struct qeth_query_cmds_supp {
 } __attribute__ ((packed));
 
 struct qeth_change_addr {
-       __u32 cmd;
-       __u32 addr_size;
-       __u32 no_macs;
-       __u8 addr[OSA_ADDR_LEN];
-} __attribute__ ((packed));
-
+       u32 cmd;
+       u32 addr_size;
+       u32 no_macs;
+       u8 addr[ETH_ALEN];
+};
 
 struct qeth_snmp_cmd {
        __u8  token[16];
index 09b1c4ef3dc98ea1bd1e6c9a65ab7e43d630922e..f2130051ca11a9c609110661170fd13424f0ab37 100644 (file)
@@ -22,8 +22,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout);
 bool qeth_l2_vnicc_is_in_use(struct qeth_card *card);
 
 struct qeth_mac {
-       u8 mac_addr[OSA_ADDR_LEN];
-       u8 is_uc:1;
+       u8 mac_addr[ETH_ALEN];
        u8 disp_flag:2;
        struct hlist_node hnode;
 };
index 5863ea170ff26447630ed22acd5174db23bf0ee2..7f236440483f2ca6fdc82caf5d21b649f8434471 100644 (file)
@@ -109,8 +109,8 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
        if (!iob)
                return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
-       memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
+       cmd->data.setdelmac.mac_length = ETH_ALEN;
+       ether_addr_copy(cmd->data.setdelmac.mac, mac);
        return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob,
                                           NULL, NULL));
 }
@@ -123,7 +123,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
        rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
        if (rc == 0) {
                card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-               memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+               ether_addr_copy(card->dev->dev_addr, mac);
                dev_info(&card->gdev->dev,
                        "MAC address %pM successfully registered on device %s\n",
                        card->dev->dev_addr, card->dev->name);
@@ -156,54 +156,37 @@ static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
        return rc;
 }
 
-static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 {
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+                                       IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
        int rc;
 
-       QETH_CARD_TEXT(card, 2, "L2Sgmac");
-       rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC);
+       QETH_CARD_TEXT(card, 2, "L2Wmac");
+       rc = qeth_l2_send_setdelmac(card, mac, cmd);
        if (rc == -EEXIST)
-               QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
-                       mac, QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n",
+                                mac, QETH_CARD_IFNAME(card));
        else if (rc)
-               QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
-                       mac, QETH_CARD_IFNAME(card), rc);
+               QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n",
+                                mac, QETH_CARD_IFNAME(card), rc);
        return rc;
 }
 
-static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
 {
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+                                       IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
        int rc;
 
-       QETH_CARD_TEXT(card, 2, "L2Dgmac");
-       rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC);
+       QETH_CARD_TEXT(card, 2, "L2Rmac");
+       rc = qeth_l2_send_setdelmac(card, mac, cmd);
        if (rc)
-               QETH_DBF_MESSAGE(2,
-                       "Could not delete group MAC %pM on %s: %d\n",
-                       mac, QETH_CARD_IFNAME(card), rc);
+               QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n",
+                                mac, QETH_CARD_IFNAME(card), rc);
        return rc;
 }
 
-static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
-{
-       if (mac->is_uc) {
-               return qeth_l2_send_setdelmac(card, mac->mac_addr,
-                                               IPA_CMD_SETVMAC);
-       } else {
-               return qeth_l2_send_setgroupmac(card, mac->mac_addr);
-       }
-}
-
-static int qeth_l2_remove_mac(struct qeth_card *card, struct qeth_mac *mac)
-{
-       if (mac->is_uc) {
-               return qeth_l2_send_setdelmac(card, mac->mac_addr,
-                                               IPA_CMD_DELVMAC);
-       } else {
-               return qeth_l2_send_delgroupmac(card, mac->mac_addr);
-       }
-}
-
 static void qeth_l2_del_all_macs(struct qeth_card *card)
 {
        struct qeth_mac *mac;
@@ -549,7 +532,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
                QETH_CARD_TEXT(card, 3, "setmcTYP");
                return -EOPNOTSUPP;
        }
-       QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN);
+       QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
        if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
                QETH_CARD_TEXT(card, 3, "setmcREC");
                return -ERESTARTSYS;
@@ -597,27 +580,23 @@ static void qeth_promisc_to_bridge(struct qeth_card *card)
  * only if there is not in the hash table storage already
  *
 */
-static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha,
-                           u8 is_uc)
+static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
 {
        u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
        struct qeth_mac *mac;
 
        hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) {
-               if (is_uc == mac->is_uc &&
-                   !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) {
+               if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
                        mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
                        return;
                }
        }
 
        mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
-
        if (!mac)
                return;
 
-       memcpy(mac->mac_addr, ha->addr, OSA_ADDR_LEN);
-       mac->is_uc = is_uc;
+       ether_addr_copy(mac->mac_addr, ha->addr);
        mac->disp_flag = QETH_DISP_ADDR_ADD;
 
        hash_add(card->mac_htable, &mac->hnode, mac_hash);
@@ -643,26 +622,29 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
        spin_lock_bh(&card->mclock);
 
        netdev_for_each_mc_addr(ha, dev)
-               qeth_l2_add_mac(card, ha, 0);
-
+               qeth_l2_add_mac(card, ha);
        netdev_for_each_uc_addr(ha, dev)
-               qeth_l2_add_mac(card, ha, 1);
+               qeth_l2_add_mac(card, ha);
 
        hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
-               if (mac->disp_flag == QETH_DISP_ADDR_DELETE) {
-                       qeth_l2_remove_mac(card, mac);
+               switch (mac->disp_flag) {
+               case QETH_DISP_ADDR_DELETE:
+                       qeth_l2_remove_mac(card, mac->mac_addr);
                        hash_del(&mac->hnode);
                        kfree(mac);
-
-               } else if (mac->disp_flag == QETH_DISP_ADDR_ADD) {
-                       rc = qeth_l2_write_mac(card, mac);
+                       break;
+               case QETH_DISP_ADDR_ADD:
+                       rc = qeth_l2_write_mac(card, mac->mac_addr);
                        if (rc) {
                                hash_del(&mac->hnode);
                                kfree(mac);
-                       } else
-                               mac->disp_flag = QETH_DISP_ADDR_DELETE;
-               } else
+                               break;
+                       }
+                       /* fall through */
+               default:
+                       /* for next call to set_rx_mode(): */
                        mac->disp_flag = QETH_DISP_ADDR_DELETE;
+               }
        }
 
        spin_unlock_bh(&card->mclock);
index e5833837b799eceb069e708fe88394731eecdda4..49f92ebbc5ad77753f5773206e26869edbb2aef7 100644 (file)
@@ -29,7 +29,7 @@ struct qeth_ipaddr {
         */
        int  ref_counter;
        enum qeth_prot_versions proto;
-       unsigned char mac[OSA_ADDR_LEN];
+       unsigned char mac[ETH_ALEN];
        union {
                struct {
                        unsigned int addr;
@@ -69,7 +69,6 @@ struct qeth_ipato_entry {
 extern const struct attribute_group *qeth_l3_attr_groups[];
 
 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
-int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
 int qeth_l3_create_device_attributes(struct device *);
 void qeth_l3_remove_device_attributes(struct device *);
 int qeth_l3_setrouting_v4(struct qeth_card *);
index ef0961e186869dd6b8ab06c0ebd901524bc41ffc..92bcb02671bc420f73d6f587a85a8ba019bdcb5e 100644 (file)
 #include <linux/kernel.h>
 #include <linux/etherdevice.h>
 #include <linux/ip.h>
+#include <linux/in.h>
 #include <linux/ipv6.h>
 #include <linux/inetdevice.h>
 #include <linux/igmp.h>
 #include <linux/slab.h>
+#include <linux/if_ether.h>
 #include <linux/if_vlan.h>
+#include <linux/skbuff.h>
 
 #include <net/ip.h>
 #include <net/arp.h>
 #include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
 #include <net/ip6_fib.h>
 #include <net/ip6_checksum.h>
 #include <net/iucv/af_iucv.h>
 
 static int qeth_l3_set_offline(struct ccwgroup_device *);
 static int qeth_l3_stop(struct net_device *);
-static void qeth_l3_set_multicast_list(struct net_device *);
+static void qeth_l3_set_rx_mode(struct net_device *dev);
 static int qeth_l3_register_addr_entry(struct qeth_card *,
                struct qeth_ipaddr *);
 static int qeth_l3_deregister_addr_entry(struct qeth_card *,
                struct qeth_ipaddr *);
 
-static int qeth_l3_isxdigit(char *buf)
-{
-       while (*buf) {
-               if (!isxdigit(*buf++))
-                       return 0;
-       }
-       return 1;
-}
-
 static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
 {
        sprintf(buf, "%pI4", addr);
 }
 
-static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
-{
-       int count = 0, rc = 0;
-       unsigned int in[4];
-       char c;
-
-       rc = sscanf(buf, "%u.%u.%u.%u%c",
-                   &in[0], &in[1], &in[2], &in[3], &c);
-       if (rc != 4 && (rc != 5 || c != '\n'))
-               return -EINVAL;
-       for (count = 0; count < 4; count++) {
-               if (in[count] > 255)
-                       return -EINVAL;
-               addr[count] = in[count];
-       }
-       return 0;
-}
-
 static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
 {
        sprintf(buf, "%pI6", addr);
 }
 
-static int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
-{
-       const char *end, *end_tmp, *start;
-       __u16 *in;
-       char num[5];
-       int num2, cnt, out, found, save_cnt;
-       unsigned short in_tmp[8] = {0, };
-
-       cnt = out = found = save_cnt = num2 = 0;
-       end = start = buf;
-       in = (__u16 *) addr;
-       memset(in, 0, 16);
-       while (*end) {
-               end = strchr(start, ':');
-               if (end == NULL) {
-                       end = buf + strlen(buf);
-                       end_tmp = strchr(start, '\n');
-                       if (end_tmp != NULL)
-                               end = end_tmp;
-                       out = 1;
-               }
-               if ((end - start)) {
-                       memset(num, 0, 5);
-                       if ((end - start) > 4)
-                               return -EINVAL;
-                       memcpy(num, start, end - start);
-                       if (!qeth_l3_isxdigit(num))
-                               return -EINVAL;
-                       sscanf(start, "%x", &num2);
-                       if (found)
-                               in_tmp[save_cnt++] = num2;
-                       else
-                               in[cnt++] = num2;
-                       if (out)
-                               break;
-               } else {
-                       if (found)
-                               return -EINVAL;
-                       found = 1;
-               }
-               start = ++end;
-       }
-       if (cnt + save_cnt > 8)
-               return -EINVAL;
-       cnt = 7;
-       while (save_cnt)
-               in[cnt--] = in_tmp[--save_cnt];
-       return 0;
-}
-
 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
                                char *buf)
 {
@@ -139,17 +67,6 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
                qeth_l3_ipaddr6_to_string(addr, buf);
 }
 
-int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
-                               __u8 *addr)
-{
-       if (proto == QETH_PROT_IPV4)
-               return qeth_l3_string_to_ipaddr4(buf, addr);
-       else if (proto == QETH_PROT_IPV6)
-               return qeth_l3_string_to_ipaddr6(buf, addr);
-       else
-               return -EINVAL;
-}
-
 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
 {
        int i, j;
@@ -207,8 +124,8 @@ inline int
 qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
 {
        return addr1->proto == addr2->proto &&
-               !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u))  &&
-               !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
+              !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
+              ether_addr_equal_64bits(addr1->mac, addr2->mac);
 }
 
 static struct qeth_ipaddr *
@@ -446,7 +363,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
        if (!iob)
                return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
+       ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
        if (addr->proto == QETH_PROT_IPV6)
                memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
                       sizeof(struct in6_addr));
@@ -582,7 +499,6 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
        int rc = 0;
 
        QETH_CARD_TEXT(card, 3, "setrtg6");
-#ifdef CONFIG_QETH_IPV6
 
        if (!qeth_is_supported(card, IPA_IPV6))
                return 0;
@@ -599,7 +515,6 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
                        " on %s. Type set to 'no router'.\n", rc,
                        QETH_CARD_IFNAME(card));
        }
-#endif
        return rc;
 }
 
@@ -896,27 +811,6 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
        return rc;
 }
 
-static u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
-{
-       if (cast_type == RTN_MULTICAST)
-               return QETH_CAST_MULTICAST;
-       if (cast_type == RTN_BROADCAST)
-               return QETH_CAST_BROADCAST;
-       return QETH_CAST_UNICAST;
-}
-
-static u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
-{
-       u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
-       if (cast_type == RTN_MULTICAST)
-               return ct | QETH_CAST_MULTICAST;
-       if (cast_type == RTN_ANYCAST)
-               return ct | QETH_CAST_ANYCAST;
-       if (cast_type == RTN_BROADCAST)
-               return ct | QETH_CAST_BROADCAST;
-       return ct | QETH_CAST_UNICAST;
-}
-
 static int qeth_l3_setadapter_parms(struct qeth_card *card)
 {
        int rc = 0;
@@ -933,7 +827,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
        return rc;
 }
 
-#ifdef CONFIG_QETH_IPV6
 static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
                enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
 {
@@ -949,7 +842,6 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
                                   qeth_setassparms_cb, NULL);
        return rc;
 }
-#endif
 
 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
 {
@@ -1045,7 +937,6 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
        return rc;
 }
 
-#ifdef CONFIG_QETH_IPV6
 static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
 {
        int rc;
@@ -1091,12 +982,9 @@ out:
        dev_info(&card->gdev->dev, "IPV6 enabled\n");
        return 0;
 }
-#endif
 
 static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
 {
-       int rc = 0;
-
        QETH_CARD_TEXT(card, 3, "strtipv6");
 
        if (!qeth_is_supported(card, IPA_IPV6)) {
@@ -1104,10 +992,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
                        "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
                return 0;
        }
-#ifdef CONFIG_QETH_IPV6
-       rc = qeth_l3_softsetup_ipv6(card);
-#endif
-       return rc ;
+       return qeth_l3_softsetup_ipv6(card);
 }
 
 static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
@@ -1179,8 +1064,8 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
 
        cmd = (struct qeth_ipa_cmd *) data;
        if (cmd->hdr.return_code == 0)
-               memcpy(card->dev->dev_addr,
-                       cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
+               ether_addr_copy(card->dev->dev_addr,
+                               cmd->data.create_destroy_addr.unique_id);
        else
                eth_random_addr(card->dev->dev_addr);
 
@@ -1328,81 +1213,22 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
        return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
 }
 
-static void qeth_l3_get_mac_for_ipm(__be32 ipm, char *mac)
-{
-       ip_eth_mc_map(ipm, mac);
-}
-
-static void qeth_l3_mark_all_mc_to_be_deleted(struct qeth_card *card)
-{
-       struct qeth_ipaddr *addr;
-       int i;
-
-       hash_for_each(card->ip_mc_htable, i, addr, hnode)
-               addr->disp_flag = QETH_DISP_ADDR_DELETE;
-
-}
-
-static void qeth_l3_add_all_new_mc(struct qeth_card *card)
-{
-       struct qeth_ipaddr *addr;
-       struct hlist_node *tmp;
-       int i;
-       int rc;
-
-       hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
-               if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
-                       rc = qeth_l3_register_addr_entry(card, addr);
-                       if (!rc || (rc == IPA_RC_LAN_OFFLINE))
-                               addr->ref_counter = 1;
-                       else {
-                               hash_del(&addr->hnode);
-                               kfree(addr);
-                       }
-               }
-       }
-
-}
-
-static void qeth_l3_delete_nonused_mc(struct qeth_card *card)
-{
-       struct qeth_ipaddr *addr;
-       struct hlist_node *tmp;
-       int i;
-       int rc;
-
-       hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
-               if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
-                       rc = qeth_l3_deregister_addr_entry(card, addr);
-                       if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) {
-                               hash_del(&addr->hnode);
-                               kfree(addr);
-                       }
-               }
-       }
-
-}
-
-
 static void
 qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
 {
        struct ip_mc_list *im4;
        struct qeth_ipaddr *tmp, *ipm;
-       char buf[MAX_ADDR_LEN];
 
        QETH_CARD_TEXT(card, 4, "addmc");
 
        tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
-               if (!tmp)
-                       return;
+       if (!tmp)
+               return;
 
        for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
             im4 = rcu_dereference(im4->next_rcu)) {
-               qeth_l3_get_mac_for_ipm(im4->multiaddr, buf);
-
+               ip_eth_mc_map(im4->multiaddr, tmp->mac);
                tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
-               memcpy(tmp->mac, buf, sizeof(tmp->mac));
                tmp->is_multicast = 1;
 
                ipm = qeth_l3_ip_from_hash(card, tmp);
@@ -1412,7 +1238,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
                        ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
                        if (!ipm)
                                continue;
-                       memcpy(ipm->mac, buf, sizeof(tmp->mac));
+                       ether_addr_copy(ipm->mac, tmp->mac);
                        ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
                        ipm->is_multicast = 1;
                        ipm->disp_flag = QETH_DISP_ADDR_ADD;
@@ -1466,25 +1292,21 @@ unlock:
        rcu_read_unlock();
 }
 
-#ifdef CONFIG_QETH_IPV6
-static void
-qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
+static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
+                                   struct inet6_dev *in6_dev)
 {
        struct qeth_ipaddr *ipm;
        struct ifmcaddr6 *im6;
        struct qeth_ipaddr *tmp;
-       char buf[MAX_ADDR_LEN];
 
        QETH_CARD_TEXT(card, 4, "addmc6");
 
        tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
-               if (!tmp)
-                       return;
+       if (!tmp)
+               return;
 
        for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
-               ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
-
-               memcpy(tmp->mac, buf, sizeof(tmp->mac));
+               ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
                memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
                       sizeof(struct in6_addr));
                tmp->is_multicast = 1;
@@ -1499,7 +1321,7 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
                if (!ipm)
                        continue;
 
-               memcpy(ipm->mac, buf, OSA_ADDR_LEN);
+               ether_addr_copy(ipm->mac, tmp->mac);
                memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
                       sizeof(struct in6_addr));
                ipm->is_multicast = 1;
@@ -1560,7 +1382,6 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
        rcu_read_unlock();
        in6_dev_put(in6_dev);
 }
-#endif /* CONFIG_QETH_IPV6 */
 
 static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
                        unsigned short vid)
@@ -1600,9 +1421,8 @@ out:
 }
 
 static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
-                       unsigned short vid)
+                                        unsigned short vid)
 {
-#ifdef CONFIG_QETH_IPV6
        struct inet6_dev *in6_dev;
        struct inet6_ifaddr *ifa;
        struct qeth_ipaddr *addr;
@@ -1637,7 +1457,6 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
        kfree(addr);
 out:
        in6_dev_put(in6_dev);
-#endif /* CONFIG_QETH_IPV6 */
 }
 
 static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
@@ -1672,44 +1491,31 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
        /* unregister IP addresses of vlan device */
        qeth_l3_free_vlan_addresses(card, vid);
        clear_bit(vid, card->active_vlans);
-       qeth_l3_set_multicast_list(card->dev);
+       qeth_l3_set_rx_mode(dev);
        return 0;
 }
 
 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
                                struct qeth_hdr *hdr)
 {
-       __u16 prot;
-       struct iphdr *ip_hdr;
-       unsigned char tg_addr[MAX_ADDR_LEN];
-
        if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
-               prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
-                             ETH_P_IP;
+               u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
+                                                                ETH_P_IP;
+               unsigned char tg_addr[ETH_ALEN];
+
+               skb_reset_network_header(skb);
                switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
                case QETH_CAST_MULTICAST:
-                       switch (prot) {
-#ifdef CONFIG_QETH_IPV6
-                       case ETH_P_IPV6:
-                               ndisc_mc_map((struct in6_addr *)
-                                    skb->data + 24,
-                                    tg_addr, card->dev, 0);
-                               break;
-#endif
-                       case ETH_P_IP:
-                               ip_hdr = (struct iphdr *)skb->data;
-                               ip_eth_mc_map(ip_hdr->daddr, tg_addr);
-                               break;
-                       default:
-                               memcpy(tg_addr, card->dev->broadcast,
-                                       card->dev->addr_len);
-                       }
+                       if (prot == ETH_P_IP)
+                               ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
+                       else
+                               ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
+
                        card->stats.multicast++;
                        skb->pkt_type = PACKET_MULTICAST;
                        break;
                case QETH_CAST_BROADCAST:
-                       memcpy(tg_addr, card->dev->broadcast,
-                               card->dev->addr_len);
+                       ether_addr_copy(tg_addr, card->dev->broadcast);
                        card->stats.multicast++;
                        skb->pkt_type = PACKET_BROADCAST;
                        break;
@@ -1721,12 +1527,11 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
                                skb->pkt_type = PACKET_OTHERHOST;
                        else
                                skb->pkt_type = PACKET_HOST;
-                       memcpy(tg_addr, card->dev->dev_addr,
-                               card->dev->addr_len);
+                       ether_addr_copy(tg_addr, card->dev->dev_addr);
                }
                if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
                        card->dev->header_ops->create(skb, card->dev, prot,
-                               tg_addr, &hdr->hdr.l3.dest_addr[2],
+                               tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac,
                                card->dev->addr_len);
                else
                        card->dev->header_ops->create(skb, card->dev, prot,
@@ -1741,7 +1546,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
                                      QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
                u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
                                hdr->hdr.l3.vlan_id :
-                               *((u16 *)&hdr->hdr.l3.dest_addr[12]);
+                               hdr->hdr.l3.next_hop.rx.vlan_id;
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
        }
 
@@ -1949,26 +1754,46 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
        }
 }
 
-static void qeth_l3_set_multicast_list(struct net_device *dev)
+static void qeth_l3_set_rx_mode(struct net_device *dev)
 {
        struct qeth_card *card = dev->ml_priv;
+       struct qeth_ipaddr *addr;
+       struct hlist_node *tmp;
+       int i, rc;
 
        QETH_CARD_TEXT(card, 3, "setmulti");
        if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
            (card->state != CARD_STATE_UP))
                return;
        if (!card->options.sniffer) {
-
                spin_lock_bh(&card->mclock);
 
-               qeth_l3_mark_all_mc_to_be_deleted(card);
-
                qeth_l3_add_multicast_ipv4(card);
-#ifdef CONFIG_QETH_IPV6
                qeth_l3_add_multicast_ipv6(card);
-#endif
-               qeth_l3_delete_nonused_mc(card);
-               qeth_l3_add_all_new_mc(card);
+
+               hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+                       switch (addr->disp_flag) {
+                       case QETH_DISP_ADDR_DELETE:
+                               rc = qeth_l3_deregister_addr_entry(card, addr);
+                               if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) {
+                                       hash_del(&addr->hnode);
+                                       kfree(addr);
+                               }
+                               break;
+                       case QETH_DISP_ADDR_ADD:
+                               rc = qeth_l3_register_addr_entry(card, addr);
+                               if (rc && rc != IPA_RC_LAN_OFFLINE) {
+                                       hash_del(&addr->hnode);
+                                       kfree(addr);
+                                       break;
+                               }
+                               addr->ref_counter = 1;
+                               /* fall through */
+                       default:
+                               /* for next call to set_rx_mode(): */
+                               addr->disp_flag = QETH_DISP_ADDR_DELETE;
+                       }
+               }
 
                spin_unlock_bh(&card->mclock);
 
@@ -2237,12 +2062,10 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
                        rc = -EFAULT;
                goto free_and_out;
        }
-#ifdef CONFIG_QETH_IPV6
        if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
                /* fails in case of GuestLAN QDIO mode */
                qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
        }
-#endif
        if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
                QETH_CARD_TEXT(card, 4, "qactf");
                rc = -EFAULT;
@@ -2422,9 +2245,8 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        return rc;
 }
 
-static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
+static int qeth_l3_get_cast_type(struct sk_buff *skb)
 {
-       int cast_type = RTN_UNSPEC;
        struct neighbour *n = NULL;
        struct dst_entry *dst;
 
@@ -2433,48 +2255,34 @@ static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
        if (dst)
                n = dst_neigh_lookup_skb(dst, skb);
        if (n) {
-               cast_type = n->type;
+               int cast_type = n->type;
+
                rcu_read_unlock();
                neigh_release(n);
                if ((cast_type == RTN_BROADCAST) ||
                    (cast_type == RTN_MULTICAST) ||
                    (cast_type == RTN_ANYCAST))
                        return cast_type;
-               else
-                       return RTN_UNSPEC;
+               return RTN_UNSPEC;
        }
        rcu_read_unlock();
 
-       /* try something else */
+       /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
        if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
-               return (skb_network_header(skb)[24] == 0xff) ?
-                               RTN_MULTICAST : 0;
+               return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
+                               RTN_MULTICAST : RTN_UNSPEC;
        else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
-               return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
-                               RTN_MULTICAST : 0;
-       /* ... */
-       if (!memcmp(skb->data, skb->dev->broadcast, 6))
+               return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
+                               RTN_MULTICAST : RTN_UNSPEC;
+
+       /* ... and MAC address */
+       if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
                return RTN_BROADCAST;
-       else {
-               u16 hdr_mac;
-
-               hdr_mac = *((u16 *)skb->data);
-               /* tr multicast? */
-               switch (card->info.link_type) {
-               case QETH_LINK_TYPE_HSTR:
-               case QETH_LINK_TYPE_LANE_TR:
-                       if ((hdr_mac == QETH_TR_MAC_NC) ||
-                           (hdr_mac == QETH_TR_MAC_C))
-                               return RTN_MULTICAST;
-                       break;
-               /* eth or so multicast? */
-               default:
-               if ((hdr_mac == QETH_ETH_MAC_V4) ||
-                           (hdr_mac == QETH_ETH_MAC_V6))
-                               return RTN_MULTICAST;
-               }
-       }
-       return cast_type;
+       if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
+               return RTN_MULTICAST;
+
+       /* default to unicast */
+       return RTN_UNSPEC;
 }
 
 static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
@@ -2494,17 +2302,27 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
        daddr[0] = 0xfe;
        daddr[1] = 0x80;
        memcpy(&daddr[8], iucv_hdr->destUserID, 8);
-       memcpy(hdr->hdr.l3.dest_addr, daddr, 16);
+       memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
 }
 
-static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
-               struct sk_buff *skb, int ipv, int cast_type)
+static u8 qeth_l3_cast_type_to_flag(int cast_type)
 {
-       struct dst_entry *dst;
+       if (cast_type == RTN_MULTICAST)
+               return QETH_CAST_MULTICAST;
+       if (cast_type == RTN_ANYCAST)
+               return QETH_CAST_ANYCAST;
+       if (cast_type == RTN_BROADCAST)
+               return QETH_CAST_BROADCAST;
+       return QETH_CAST_UNICAST;
+}
 
+static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
+                               struct sk_buff *skb, int ipv, int cast_type,
+                               unsigned int data_len)
+{
        memset(hdr, 0, sizeof(struct qeth_hdr));
        hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
-       hdr->hdr.l3.ext_flags = 0;
+       hdr->hdr.l3.length = data_len;
 
        /*
         * before we're going to overwrite this location with next hop ip.
@@ -2518,44 +2336,40 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
                hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
        }
 
-       hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
+       /* OSA only: */
+       if (!ipv) {
+               hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
+               if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
+                                           skb->dev->broadcast))
+                       hdr->hdr.l3.flags |= QETH_CAST_BROADCAST;
+               else
+                       hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ?
+                               QETH_CAST_MULTICAST : QETH_CAST_UNICAST;
+               return;
+       }
 
+       hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type);
        rcu_read_lock();
-       dst = skb_dst(skb);
        if (ipv == 4) {
-               struct rtable *rt = (struct rtable *) dst;
-               __be32 *pkey = &ip_hdr(skb)->daddr;
+               struct rtable *rt = skb_rtable(skb);
 
-               if (rt && rt->rt_gateway)
-                       pkey = &rt->rt_gateway;
-
-               /* IPv4 */
-               hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
-               memset(hdr->hdr.l3.dest_addr, 0, 12);
-               *((__be32 *) (&hdr->hdr.l3.dest_addr[12])) = *pkey;
-       } else if (ipv == 6) {
-               struct rt6_info *rt = (struct rt6_info *) dst;
-               struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
+               *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
+                               rt_nexthop(rt, ip_hdr(skb)->daddr) :
+                               ip_hdr(skb)->daddr;
+       } else {
+               /* IPv6 */
+               const struct rt6_info *rt = skb_rt6_info(skb);
+               const struct in6_addr *next_hop;
 
                if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
-                       pkey = &rt->rt6i_gateway;
+                       next_hop = &rt->rt6i_gateway;
+               else
+                       next_hop = &ipv6_hdr(skb)->daddr;
+               memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16);
 
-               /* IPv6 */
-               hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
-               if (card->info.type == QETH_CARD_TYPE_IQD)
-                       hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
-               memcpy(hdr->hdr.l3.dest_addr, pkey, 16);
-       } else {
-               if (!memcmp(skb->data + sizeof(struct qeth_hdr),
-                           skb->dev->broadcast, 6)) {
-                       /* broadcast? */
-                       hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
-                                               QETH_HDR_PASSTHRU;
-               } else {
-                       hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
-                               QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
-                               QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
-               }
+               hdr->hdr.l3.flags |= QETH_HDR_IPV6;
+               if (card->info.type != QETH_CARD_TYPE_IQD)
+                       hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
        }
        rcu_read_unlock();
 }
@@ -2587,7 +2401,6 @@ static void qeth_tso_fill_header(struct qeth_card *card,
 
        /*fix header to TSO values ...*/
        hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
-       hdr->hdr.hdr.l3.length = skb->len - sizeof(struct qeth_hdr_tso);
        /*set values which are fix for the first approach ...*/
        hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
        hdr->ext.imb_hdr_no  = 1;
@@ -2655,7 +2468,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
        struct qeth_card *card = dev->ml_priv;
        struct sk_buff *new_skb = NULL;
        int ipv = qeth_get_ip_version(skb);
-       int cast_type = qeth_l3_get_cast_type(card, skb);
+       int cast_type = qeth_l3_get_cast_type(skb);
        struct qeth_qdio_out_q *queue =
                card->qdio.out_qs[card->qdio.do_prio_queueing
                        || (cast_type && card->info.is_multicast_different) ?
@@ -2748,21 +2561,23 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
        if (use_tso) {
                hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
                memset(hdr, 0, sizeof(struct qeth_hdr_tso));
-               qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
+               qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+                                   new_skb->len - sizeof(struct qeth_hdr_tso));
                qeth_tso_fill_header(card, hdr, new_skb);
                hdr_elements++;
        } else {
                if (data_offset < 0) {
                        hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
-                       qeth_l3_fill_header(card, hdr, new_skb, ipv,
-                                               cast_type);
+                       qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+                                           new_skb->len -
+                                           sizeof(struct qeth_hdr));
                } else {
                        if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV)
                                qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb);
                        else {
                                qeth_l3_fill_header(card, hdr, new_skb, ipv,
-                                                       cast_type);
-                               hdr->hdr.l3.length = new_skb->len - data_offset;
+                                                   cast_type,
+                                                   new_skb->len - data_offset);
                        }
                }
 
@@ -2930,7 +2745,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
        .ndo_get_stats          = qeth_get_stats,
        .ndo_start_xmit         = qeth_l3_hard_start_xmit,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_rx_mode        = qeth_l3_set_multicast_list,
+       .ndo_set_rx_mode        = qeth_l3_set_rx_mode,
        .ndo_do_ioctl           = qeth_do_ioctl,
        .ndo_change_mtu         = qeth_change_mtu,
        .ndo_fix_features       = qeth_fix_features,
@@ -2947,7 +2762,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
        .ndo_start_xmit         = qeth_l3_hard_start_xmit,
        .ndo_features_check     = qeth_features_check,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_rx_mode        = qeth_l3_set_multicast_list,
+       .ndo_set_rx_mode        = qeth_l3_set_rx_mode,
        .ndo_do_ioctl           = qeth_do_ioctl,
        .ndo_change_mtu         = qeth_change_mtu,
        .ndo_fix_features       = qeth_fix_features,
@@ -3145,7 +2960,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                        __qeth_l3_open(card->dev);
                else
                        dev_open(card->dev);
-               qeth_l3_set_multicast_list(card->dev);
+               qeth_l3_set_rx_mode(card->dev);
                qeth_recover_features(card->dev);
                rtnl_unlock();
        }
@@ -3371,10 +3186,6 @@ static struct notifier_block qeth_l3_ip_notifier = {
        NULL,
 };
 
-#ifdef CONFIG_QETH_IPV6
-/**
- * IPv6 event handler
- */
 static int qeth_l3_ip6_event(struct notifier_block *this,
                             unsigned long event, void *ptr)
 {
@@ -3419,7 +3230,6 @@ static struct notifier_block qeth_l3_ip6_notifier = {
        qeth_l3_ip6_event,
        NULL,
 };
-#endif
 
 static int qeth_l3_register_notifiers(void)
 {
@@ -3429,35 +3239,25 @@ static int qeth_l3_register_notifiers(void)
        rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
        if (rc)
                return rc;
-#ifdef CONFIG_QETH_IPV6
        rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
        if (rc) {
                unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
                return rc;
        }
-#else
-       pr_warn("There is no IPv6 support for the layer 3 discipline\n");
-#endif
        return 0;
 }
 
 static void qeth_l3_unregister_notifiers(void)
 {
-
        QETH_DBF_TEXT(SETUP, 5, "unregnot");
        WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
-#ifdef CONFIG_QETH_IPV6
        WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
-#endif /* QETH_IPV6 */
 }
 
 static int __init qeth_l3_init(void)
 {
-       int rc = 0;
-
        pr_info("register layer 3 discipline\n");
-       rc = qeth_l3_register_notifiers();
-       return rc;
+       return qeth_l3_register_notifiers();
 }
 
 static void __exit qeth_l3_exit(void)
index 6ea2b528a64efbabee5782da7bf8c5d1bce3ff4e..00a10b66c01f6316c934f1ec31311d601b8a418c 100644 (file)
 #include <linux/slab.h>
 #include <asm/ebcdic.h>
 #include <linux/hashtable.h>
+#include <linux/inet.h>
 #include "qeth_l3.h"
 
 #define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
 struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
 
+static int qeth_l3_string_to_ipaddr(const char *buf,
+                                   enum qeth_prot_versions proto, u8 *addr)
+{
+       const char *end;
+
+       if ((proto == QETH_PROT_IPV4 && !in4_pton(buf, -1, addr, -1, &end)) ||
+           (proto == QETH_PROT_IPV6 && !in6_pton(buf, -1, addr, -1, &end)))
+               return -EINVAL;
+       return 0;
+}
+
 static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
                        struct qeth_routing_info *route, char *buf)
 {
index 6e3d81969a77cc895580f79fa8e3aaa3b8bb4fee..d52265416da2af0da11cca770304f33ab203ad20 100644 (file)
@@ -1725,6 +1725,7 @@ struct aac_dev
 #define FIB_CONTEXT_FLAG_NATIVE_HBA            (0x00000010)
 #define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF        (0x00000020)
 #define FIB_CONTEXT_FLAG_SCSI_CMD      (0x00000040)
+#define FIB_CONTEXT_FLAG_EH_RESET      (0x00000080)
 
 /*
  *     Define the command values
index bdf127aaab41d814e2337d2944166a0498bf1a66..d55332de08f91ad8e54e1296867569a8fa109a34 100644 (file)
@@ -1037,7 +1037,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
                        info = &aac->hba_map[bus][cid];
                        if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
                            info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
-                               fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+                               fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
                                cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
                        }
                }
index a4f28b7e4c65df81ef583eab878a3aa9fc45e0e4..e18877177f1b52d9c43ad3b991b858c80a6cc079 100644 (file)
@@ -1576,7 +1576,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
                return req;
 
        for_each_bio(bio) {
-               ret = blk_rq_append_bio(req, bio);
+               struct bio *bounce_bio = bio;
+
+               ret = blk_rq_append_bio(req, &bounce_bio);
                if (ret)
                        return ERR_PTR(ret);
        }
index 449ef5adbb2bc3719b3ba72953ebbecef9e4d086..dfb8da83fa504c979e9ba0639b32a4cae2c8969c 100644 (file)
@@ -374,10 +374,8 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
                            model, compatible);
 
        if (strflags)
-               devinfo->flags = simple_strtoul(strflags, NULL, 0);
-       else
-               devinfo->flags = flags;
-
+               flags = (__force blist_flags_t)simple_strtoul(strflags, NULL, 0);
+       devinfo->flags = flags;
        devinfo->compatible = compatible;
 
        if (compatible)
index be5e919db0e8cd9e713727a91bc46923673ea556..0880d975eed3a56c58d27172bfd18c1a59da5d4b 100644 (file)
@@ -770,7 +770,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
  *     SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
  **/
 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
-               int *bflags, int async)
+               blist_flags_t *bflags, int async)
 {
        int ret;
 
@@ -1049,14 +1049,15 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
  *   - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
  **/
 static int scsi_probe_and_add_lun(struct scsi_target *starget,
-                                 u64 lun, int *bflagsp,
+                                 u64 lun, blist_flags_t *bflagsp,
                                  struct scsi_device **sdevp,
                                  enum scsi_scan_mode rescan,
                                  void *hostdata)
 {
        struct scsi_device *sdev;
        unsigned char *result;
-       int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
+       blist_flags_t bflags;
+       int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 
        /*
@@ -1201,7 +1202,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
  *     Modifies sdevscan->lun.
  **/
 static void scsi_sequential_lun_scan(struct scsi_target *starget,
-                                    int bflags, int scsi_level,
+                                    blist_flags_t bflags, int scsi_level,
                                     enum scsi_scan_mode rescan)
 {
        uint max_dev_lun;
@@ -1292,7 +1293,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
  *     0: scan completed (or no memory, so further scanning is futile)
  *     1: could not scan with REPORT LUN
  **/
-static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
+static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
                                enum scsi_scan_mode rescan)
 {
        unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -1538,7 +1539,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
                unsigned int id, u64 lun, enum scsi_scan_mode rescan)
 {
        struct Scsi_Host *shost = dev_to_shost(parent);
-       int bflags = 0;
+       blist_flags_t bflags = 0;
        int res;
        struct scsi_target *starget;
 
index 50e7d7e4a86179b9a47d18569bb759be0b674b88..a9996c16f4ae63fc820065c75b68fb51c5a01ab4 100644 (file)
@@ -967,7 +967,8 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
 
-#define BLIST_FLAG_NAME(name) [ilog2(BLIST_##name)] = #name
+#define BLIST_FLAG_NAME(name)                                  \
+       [ilog2((__force unsigned int)BLIST_##name)] = #name
 static const char *const sdev_bflags_name[] = {
 #include "scsi_devinfo_tbl.c"
 };
@@ -984,7 +985,7 @@ sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
        for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
                const char *name = NULL;
 
-               if (!(sdev->sdev_bflags & BIT(i)))
+               if (!(sdev->sdev_bflags & (__force blist_flags_t)BIT(i)))
                        continue;
                if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i])
                        name = sdev_bflags_name[i];
index d0219e36080c3b79109ac405eb0cd726545585fc..10ebb213ddb33e2920e2fe83e60cc712a50c3002 100644 (file)
 
 /* Our blacklist flags */
 enum {
-       SPI_BLIST_NOIUS = 0x1,
+       SPI_BLIST_NOIUS = (__force blist_flags_t)0x1,
 };
 
 /* blacklist table, modelled on scsi_devinfo.c */
 static struct {
        char *vendor;
        char *model;
-       unsigned flags;
+       blist_flags_t flags;
 } spi_static_device_list[] __initdata = {
        {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS },
        {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS },
@@ -221,9 +221,11 @@ static int spi_device_configure(struct transport_container *tc,
 {
        struct scsi_device *sdev = to_scsi_device(dev);
        struct scsi_target *starget = sdev->sdev_target;
-       unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
-                                                     &sdev->inquiry[16],
-                                                     SCSI_DEVINFO_SPI);
+       blist_flags_t bflags;
+
+       bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
+                                            &sdev->inquiry[16],
+                                            SCSI_DEVINFO_SPI);
 
        /* Populate the target capability fields with the values
         * gleaned from the device inquiry */
index 77fe55ce790c61a8835c4e2338a36be43dafcbac..d65345312527ce450b539964aa0465e1e6787b44 100644 (file)
@@ -79,6 +79,7 @@
 #define A3700_SPI_BYTE_LEN             BIT(5)
 #define A3700_SPI_CLK_PRESCALE         BIT(0)
 #define A3700_SPI_CLK_PRESCALE_MASK    (0x1f)
+#define A3700_SPI_CLK_EVEN_OFFS                (0x10)
 
 #define A3700_SPI_WFIFO_THRS_BIT       28
 #define A3700_SPI_RFIFO_THRS_BIT       24
@@ -220,6 +221,13 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
 
        prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
 
+       /* For prescaler values over 15, we can only set it by steps of 2.
+        * Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to
+        * 30. We only use this range from 16 to 30.
+        */
+       if (prescale > 15)
+               prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2);
+
        val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
        val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
 
index f95da364c2832b0142158e12c97648aab81b7165..66947097102370d0f54ba2559abddab1ac812a5d 100644 (file)
@@ -1661,12 +1661,12 @@ static int atmel_spi_remove(struct platform_device *pdev)
        pm_runtime_get_sync(&pdev->dev);
 
        /* reset the hardware and block queue progress */
-       spin_lock_irq(&as->lock);
        if (as->use_dma) {
                atmel_spi_stop_dma(master);
                atmel_spi_release_dma(master);
        }
 
+       spin_lock_irq(&as->lock);
        spi_writel(as, CR, SPI_BIT(SWRST));
        spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
        spi_readl(as, SR);
index 2ce875764ca646a2bdfb803cae33465ab8fa1786..0835a8d88fb8f85ab5ae44a4aa74d94121d19d87 100644 (file)
@@ -377,8 +377,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
        /* Sets SPCMD */
        rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 
-       /* Enables SPI function in master mode */
-       rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
+       /* Sets RSPI mode */
+       rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
 
        return 0;
 }
index c5cd635c28f388bec2cfd47b9a6c6c9dcec9e046..41410031f8e99e6a1d54b8f94990df0133356ced 100644 (file)
@@ -525,7 +525,7 @@ err_free_master:
 
 static int sun4i_spi_remove(struct platform_device *pdev)
 {
-       pm_runtime_disable(&pdev->dev);
+       pm_runtime_force_suspend(&pdev->dev);
 
        return 0;
 }
index bc7100b93dfcf0c24213f479f9a5fffc41666315..e0b9fe1d0e37d98a7243ca35a56b1d62e024e8b3 100644 (file)
@@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
        while (remaining_words) {
                int n_words, tx_words, rx_words;
                u32 sr;
+               int stalled;
 
                n_words = min(remaining_words, xspi->buffer_size);
 
@@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
 
                /* Read out all the data from the Rx FIFO */
                rx_words = n_words;
+               stalled = 10;
                while (rx_words) {
+                       if (rx_words == n_words && !(stalled--) &&
+                           !(sr & XSPI_SR_TX_EMPTY_MASK) &&
+                           (sr & XSPI_SR_RX_EMPTY_MASK)) {
+                               dev_err(&spi->dev,
+                                       "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
+                               xspi_init_hw(xspi);
+                               return -EIO;
+                       }
+
                        if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
                                xilinx_spi_rx(xspi);
                                rx_words--;
index d8e4219c2324900c583245407a7c78f20e20325e..f48a2ee587a4ff8426643642d73061ea960a5820 100644 (file)
@@ -3,10 +3,7 @@ config SSB_POSSIBLE
        depends on HAS_IOMEM && HAS_DMA
        default y
 
-menu "Sonics Silicon Backplane"
-       depends on SSB_POSSIBLE
-
-config SSB
+menuconfig SSB
        tristate "Sonics Silicon Backplane support"
        depends on SSB_POSSIBLE
        help
@@ -21,6 +18,8 @@ config SSB
 
          If unsure, say N.
 
+if SSB
+
 # Common SPROM support routines
 config SSB_SPROM
        bool
@@ -185,4 +184,4 @@ config SSB_DRIVER_GPIO
 
          If unsure, say N
 
-endmenu
+endif # SSB
index 7c69b4a9694d2016a8aac3b63a4b7d4399146688..0d99b242e82e3f84da25a47564f96db60be4b5f5 100644 (file)
@@ -920,7 +920,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                                        " %d i: %d bio: %p, allocating another"
                                        " bio\n", bio->bi_vcnt, i, bio);
 
-                               rc = blk_rq_append_bio(req, bio);
+                               rc = blk_rq_append_bio(req, &bio);
                                if (rc) {
                                        pr_err("pSCSI: failed to append bio\n");
                                        goto fail;
@@ -938,7 +938,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
        }
 
        if (bio) {
-               rc = blk_rq_append_bio(req, bio);
+               rc = blk_rq_append_bio(req, &bio);
                if (rc) {
                        pr_err("pSCSI: failed to append bio\n");
                        goto fail;
index f937082f32449a9f3316e581675c47e51f7f6d30..58e2fe40b2a04423de26729613bae16c233c8920 100644 (file)
@@ -34,6 +34,7 @@ config CRAMFS_BLOCKDEV
 config CRAMFS_MTD
        bool "Support CramFs image directly mapped in physical memory"
        depends on CRAMFS && MTD
+       depends on CRAMFS=m || MTD=y
        default y if !CRAMFS_BLOCKDEV
        help
          This option allows the CramFs driver to load data directly from
index 156f56acfe8e7cce22cac60a0cf35635d274a1c3..5688b5e1b9378107597a6117c8c3732889f951d2 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1339,15 +1339,10 @@ void setup_new_exec(struct linux_binprm * bprm)
                 * avoid bad behavior from the prior rlimits. This has to
                 * happen before arch_pick_mmap_layout(), which examines
                 * RLIMIT_STACK, but after the point of no return to avoid
-                * races from other threads changing the limits. This also
-                * must be protected from races with prlimit() calls.
+                * needing to clean up the change on failure.
                 */
-               task_lock(current->group_leader);
                if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM)
                        current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM;
-               if (current->signal->rlim[RLIMIT_STACK].rlim_max > _STK_LIM)
-                       current->signal->rlim[RLIMIT_STACK].rlim_max = _STK_LIM;
-               task_unlock(current->group_leader);
        }
 
        arch_pick_mmap_layout(current->mm);
index 07bca11749d406fd4e3130e31026f1db9ac0d879..c941251ac0c008587b1aaee10fb52e27a50e684a 100644 (file)
@@ -4722,6 +4722,7 @@ retry:
                                                    EXT4_INODE_EOFBLOCKS);
                }
                ext4_mark_inode_dirty(handle, inode);
+               ext4_update_inode_fsync_trans(handle, inode, 1);
                ret2 = ext4_journal_stop(handle);
                if (ret2)
                        break;
index b4267d72f24955c314d78f350c4247e6c7373cc2..b32cf263750d1d3b2024847e78bf1c6181e8a44f 100644 (file)
@@ -816,6 +816,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
                struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT);
 
+               if (IS_ERR(p))
+                       return ERR_CAST(p);
                if (p) {
                        int acl_size = p->a_count * sizeof(ext4_acl_entry);
 
index 7df2c5644e59c9678e9379985338f972a69d42d1..534a9130f62578931a24477f317c17b42c71ffc3 100644 (file)
@@ -149,6 +149,15 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
  */
 int ext4_inode_is_fast_symlink(struct inode *inode)
 {
+       if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+               int ea_blocks = EXT4_I(inode)->i_file_acl ?
+                               EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
+
+               if (ext4_has_inline_data(inode))
+                       return 0;
+
+               return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
+       }
        return S_ISLNK(inode->i_mode) && inode->i_size &&
               (inode->i_size < EXT4_N_BLOCKS * 4);
 }
index 798b3ac680db1b4f8c4510a0bd66d0510d11216a..e750d68fbcb50c0447e13556905da8401f5f6b03 100644 (file)
@@ -1399,6 +1399,10 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
                               "falling back\n"));
        }
        nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+       if (!nblocks) {
+               ret = NULL;
+               goto cleanup_and_exit;
+       }
        start = EXT4_I(dir)->i_dir_start_lookup;
        if (start >= nblocks)
                start = 0;
index e158ec6b527b2d72341e096f76e628b5e61ea4cf..9d1374ab6e06f2cd7b57aedf196c53a745a1a683 100644 (file)
@@ -2826,6 +2826,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
                            SB_DIRSYNC |
                            SB_SILENT |
                            SB_POSIXACL |
+                           SB_LAZYTIME |
                            SB_I_VERSION);
 
        if (flags & MS_REMOUNT)
index d4e33e8f1e6fee3172e0e07e9d358587eea34bc4..7ff1349609e4874a35268876065490d77f5e01ab 100644 (file)
@@ -191,6 +191,24 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
 
        INIT_LIST_HEAD(&s->s_mounts);
        s->s_user_ns = get_user_ns(user_ns);
+       init_rwsem(&s->s_umount);
+       lockdep_set_class(&s->s_umount, &type->s_umount_key);
+       /*
+        * sget() can have s_umount recursion.
+        *
+        * When it cannot find a suitable sb, it allocates a new
+        * one (this one), and tries again to find a suitable old
+        * one.
+        *
+        * In case that succeeds, it will acquire the s_umount
+        * lock of the old one. Since these are clearly distrinct
+        * locks, and this object isn't exposed yet, there's no
+        * risk of deadlocks.
+        *
+        * Annotate this by putting this lock in a different
+        * subclass.
+        */
+       down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
 
        if (security_sb_alloc(s))
                goto fail;
@@ -218,25 +236,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
                goto fail;
        if (list_lru_init_memcg(&s->s_inode_lru))
                goto fail;
-
-       init_rwsem(&s->s_umount);
-       lockdep_set_class(&s->s_umount, &type->s_umount_key);
-       /*
-        * sget() can have s_umount recursion.
-        *
-        * When it cannot find a suitable sb, it allocates a new
-        * one (this one), and tries again to find a suitable old
-        * one.
-        *
-        * In case that succeeds, it will acquire the s_umount
-        * lock of the old one. Since these are clearly distrinct
-        * locks, and this object isn't exposed yet, there's no
-        * risk of deadlocks.
-        *
-        * Annotate this by putting this lock in a different
-        * subclass.
-        */
-       down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
        s->s_count = 1;
        atomic_set(&s->s_active, 1);
        mutex_init(&s->s_vfs_rename_mutex);
index 6e45608b2399813329e2280e601c2465940def53..9da6ce22803f03fc318a7fdd33af380eae67d4e6 100644 (file)
@@ -62,7 +62,7 @@ struct arch_timer_cpu {
        bool                    enabled;
 };
 
-int kvm_timer_hyp_init(void);
+int kvm_timer_hyp_init(bool);
 int kvm_timer_enable(struct kvm_vcpu *vcpu);
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
index 82f0c8fd7be8fd20951af806319f64f615f4ffc6..23d29b39f71e83e8a6a25540adc2e3f28702aec7 100644 (file)
@@ -492,6 +492,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
 
 #define bio_set_dev(bio, bdev)                         \
 do {                                           \
+       if ((bio)->bi_disk != (bdev)->bd_disk)  \
+               bio_clear_flag(bio, BIO_THROTTLED);\
        (bio)->bi_disk = (bdev)->bd_disk;       \
        (bio)->bi_partno = (bdev)->bd_partno;   \
 } while (0)
index a1e628e032dad75bf1837a25e45b55a7f54ca2df..9e7d8bd776d227d2ba92b137af7230300f5b1d4a 100644 (file)
@@ -50,8 +50,6 @@ struct blk_issue_stat {
 struct bio {
        struct bio              *bi_next;       /* request queue link */
        struct gendisk          *bi_disk;
-       u8                      bi_partno;
-       blk_status_t            bi_status;
        unsigned int            bi_opf;         /* bottom bits req flags,
                                                 * top bits REQ_OP. Use
                                                 * accessors.
@@ -59,8 +57,8 @@ struct bio {
        unsigned short          bi_flags;       /* status, etc and bvec pool number */
        unsigned short          bi_ioprio;
        unsigned short          bi_write_hint;
-
-       struct bvec_iter        bi_iter;
+       blk_status_t            bi_status;
+       u8                      bi_partno;
 
        /* Number of segments in this BIO after
         * physical address coalescing is performed.
@@ -74,8 +72,9 @@ struct bio {
        unsigned int            bi_seg_front_size;
        unsigned int            bi_seg_back_size;
 
-       atomic_t                __bi_remaining;
+       struct bvec_iter        bi_iter;
 
+       atomic_t                __bi_remaining;
        bio_end_io_t            *bi_end_io;
 
        void                    *bi_private;
index 8089ca17db9ac65998ec9cf82f65743bb5c5abb9..0ce8a372d5069a7aca7810429a968d20e923d3d1 100644 (file)
@@ -135,7 +135,7 @@ typedef __u32 __bitwise req_flags_t;
 struct request {
        struct list_head queuelist;
        union {
-               call_single_data_t csd;
+               struct __call_single_data csd;
                u64 fifo_time;
        };
 
@@ -241,14 +241,24 @@ struct request {
        struct request *next_rq;
 };
 
+static inline bool blk_op_is_scsi(unsigned int op)
+{
+       return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
+}
+
+static inline bool blk_op_is_private(unsigned int op)
+{
+       return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
+}
+
 static inline bool blk_rq_is_scsi(struct request *rq)
 {
-       return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
+       return blk_op_is_scsi(req_op(rq));
 }
 
 static inline bool blk_rq_is_private(struct request *rq)
 {
-       return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
+       return blk_op_is_private(req_op(rq));
 }
 
 static inline bool blk_rq_is_passthrough(struct request *rq)
@@ -256,6 +266,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq)
        return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
 }
 
+static inline bool bio_is_passthrough(struct bio *bio)
+{
+       unsigned op = bio_op(bio);
+
+       return blk_op_is_scsi(op) || blk_op_is_private(op);
+}
+
 static inline unsigned short req_get_ioprio(struct request *req)
 {
        return req->ioprio;
@@ -948,7 +965,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 extern void blk_rq_unprep_clone(struct request *rq);
 extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
                                     struct request *rq);
-extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
+extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
 extern void blk_delay_queue(struct request_queue *, unsigned long);
 extern void blk_queue_split(struct request_queue *, struct bio **);
 extern void blk_recount_segments(struct request_queue *, struct bio *);
index 94a02ceb1246a2dca1f684fcd1305d418c7b27d5..883a35d50cd538b1d14becbb58f3d88e7c1718ce 100644 (file)
  * In practice this is far bigger than any realistic pointer offset; this limit
  * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
  */
-#define BPF_MAX_VAR_OFF        (1ULL << 31)
+#define BPF_MAX_VAR_OFF        (1 << 29)
 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
  * that converting umax_value to int cannot overflow.
  */
-#define BPF_MAX_VAR_SIZ        INT_MAX
+#define BPF_MAX_VAR_SIZ        (1 << 29)
 
 /* Liveness marks, used for registers and spilled-regs (in stack slots).
  * Read marks propagate upwards until they find a write mark; they record that
diff --git a/include/linux/intel-pti.h b/include/linux/intel-pti.h
new file mode 100644 (file)
index 0000000..2710d72
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (C) Intel 2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The PTI (Parallel Trace Interface) driver directs trace data routed from
+ * various parts in the system out through the Intel Penwell PTI port and
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard.
+ *
+ * This header file will allow other parts of the OS to use the
+ * interface to write out it's contents for debugging a mobile system.
+ */
+
+#ifndef LINUX_INTEL_PTI_H_
+#define LINUX_INTEL_PTI_H_
+
+/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
+#define PTI_LASTDWORD_DTS      0x30
+
+/* basic structure used as a write address to the PTI HW */
+struct pti_masterchannel {
+       u8 master;
+       u8 channel;
+};
+
+/* the following functions are defined in misc/pti.c */
+void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
+struct pti_masterchannel *pti_request_masterchannel(u8 type,
+                                                   const char *thread_name);
+void pti_release_masterchannel(struct pti_masterchannel *mc);
+
+#endif /* LINUX_INTEL_PTI_H_ */
index cb18c6290ca87290996e636f3eda14eb03d26316..8415bf1a9776245b810c8f92fa16c98eb038a45f 100644 (file)
@@ -273,7 +273,8 @@ struct ipv6_pinfo {
                                                 * 100: prefer care-of address
                                                 */
                                dontfrag:1,
-                               autoflowlabel:1;
+                               autoflowlabel:1,
+                               autoflowlabel_set:1;
        __u8                    min_hopcount;
        __u8                    tclass;
        __be32                  rcv_flowinfo;
index e37c21d8eb19da5f375d0e4d41b70e659dc827b6..268aad47ecd36aca76b7803774e718304f7fff01 100644 (file)
@@ -41,8 +41,8 @@ struct mdio_device {
        int addr;
        int flags;
        struct gpio_desc *reset;
-       unsigned int reset_delay;
-       unsigned int reset_post_delay;
+       unsigned int reset_assert_delay;
+       unsigned int reset_deassert_delay;
 };
 #define to_mdio_device(d) container_of(d, struct mdio_device, dev)
 
index a2a1318a3d0c8be0a1fb3d1a08fcf671ff9d8bee..c3d3f04d8cc689eddf217c0626e71d8c16530db5 100644 (file)
@@ -915,10 +915,10 @@ enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN};
 #define LTR_L1SS_PWR_GATE_CHECK_CARD_EN        BIT(6)
 
 enum dev_aspm_mode {
-       DEV_ASPM_DISABLE = 0,
        DEV_ASPM_DYNAMIC,
        DEV_ASPM_BACKDOOR,
        DEV_ASPM_STATIC,
+       DEV_ASPM_DISABLE,
 };
 
 /*
index a886b51511abbf146c4c76309aa313e4bbf77dda..57b109c6e422784dd0498c46f9a1a6e96f3ef0ca 100644 (file)
@@ -556,6 +556,7 @@ struct mlx5_core_sriov {
 };
 
 struct mlx5_irq_info {
+       cpumask_var_t mask;
        char name[MLX5_MAX_IRQ_NAME];
 };
 
@@ -1048,7 +1049,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
                       enum mlx5_eq_type type);
 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 int mlx5_start_eqs(struct mlx5_core_dev *dev);
-int mlx5_stop_eqs(struct mlx5_core_dev *dev);
+void mlx5_stop_eqs(struct mlx5_core_dev *dev);
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
                    unsigned int *irqn);
 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
index 38a7577a9ce71fbcf63c21e2911364795842daa8..d44ec5f41d4a04c72b25b4db1d6fb0217f8f1fa1 100644 (file)
@@ -147,7 +147,7 @@ enum {
        MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,
        MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,
        MLX5_CMD_OP_QUERY_Q_COUNTER               = 0x773,
-       MLX5_CMD_OP_SET_RATE_LIMIT                = 0x780,
+       MLX5_CMD_OP_SET_PP_RATE_LIMIT             = 0x780,
        MLX5_CMD_OP_QUERY_RATE_LIMIT              = 0x781,
        MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT      = 0x782,
        MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT     = 0x783,
@@ -7239,7 +7239,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
        u8         vxlan_udp_port[0x10];
 };
 
-struct mlx5_ifc_set_rate_limit_out_bits {
+struct mlx5_ifc_set_pp_rate_limit_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
 
@@ -7248,7 +7248,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {
        u8         reserved_at_40[0x40];
 };
 
-struct mlx5_ifc_set_rate_limit_in_bits {
+struct mlx5_ifc_set_pp_rate_limit_in_bits {
        u8         opcode[0x10];
        u8         reserved_at_10[0x10];
 
@@ -7261,6 +7261,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {
        u8         reserved_at_60[0x20];
 
        u8         rate_limit[0x20];
+
+       u8         reserved_at_a0[0x160];
 };
 
 struct mlx5_ifc_access_register_out_bits {
index b1b0ca7ccb2bacac5d997f97f86848e928bc9da7..db84c516bcfbf797744bdbc8d276381e4d69ea6c 100644 (file)
@@ -78,6 +78,8 @@ enum {
        NETIF_F_HW_ESP_TX_CSUM_BIT,     /* ESP with TX checksum offload */
        NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
 
+       NETIF_F_GRO_HW_BIT,             /* Hardware Generic receive offload */
+
        /*
         * Add your fresh new feature above and remember to update
         * netdev_features_strings[] in net/core/ethtool.c and maybe
@@ -97,6 +99,7 @@ enum {
 #define NETIF_F_FRAGLIST       __NETIF_F(FRAGLIST)
 #define NETIF_F_FSO            __NETIF_F(FSO)
 #define NETIF_F_GRO            __NETIF_F(GRO)
+#define NETIF_F_GRO_HW         __NETIF_F(GRO_HW)
 #define NETIF_F_GSO            __NETIF_F(GSO)
 #define NETIF_F_GSO_ROBUST     __NETIF_F(GSO_ROBUST)
 #define NETIF_F_HIGHDMA                __NETIF_F(HIGHDMA)
index cc4ce7456e38198f53f555fd82b67ef604957618..352066e4eeef5b1ddfbf92f22f920e2d004ea99e 100644 (file)
@@ -1726,7 +1726,7 @@ struct net_device {
        const struct ndisc_ops *ndisc_ops;
 #endif
 
-#ifdef CONFIG_XFRM
+#ifdef CONFIG_XFRM_OFFLOAD
        const struct xfrmdev_ops *xfrmdev_ops;
 #endif
 
@@ -2793,7 +2793,9 @@ struct softnet_data {
        struct Qdisc            *output_queue;
        struct Qdisc            **output_queue_tailp;
        struct sk_buff          *completion_queue;
-
+#ifdef CONFIG_XFRM_OFFLOAD
+       struct sk_buff_head     xfrm_backlog;
+#endif
 #ifdef CONFIG_RPS
        /* input_queue_head should be written by cpu owning this struct,
         * and only read by other cpus. Worth using a cache line.
@@ -3325,7 +3327,7 @@ int dev_get_phys_port_id(struct net_device *dev,
 int dev_get_phys_port_name(struct net_device *dev,
                           char *name, size_t len);
 int dev_change_proto_down(struct net_device *dev, bool proto_down);
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                                    struct netdev_queue *txq, int *ret);
 
diff --git a/include/linux/pti.h b/include/linux/pti.h
deleted file mode 100644 (file)
index b3ea01a..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- *  Copyright (C) Intel 2011
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * The PTI (Parallel Trace Interface) driver directs trace data routed from
- * various parts in the system out through the Intel Penwell PTI port and
- * out of the mobile device for analysis with a debugging tool
- * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
- * compact JTAG, standard.
- *
- * This header file will allow other parts of the OS to use the
- * interface to write out it's contents for debugging a mobile system.
- */
-
-#ifndef PTI_H_
-#define PTI_H_
-
-/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
-#define PTI_LASTDWORD_DTS      0x30
-
-/* basic structure used as a write address to the PTI HW */
-struct pti_masterchannel {
-       u8 master;
-       u8 channel;
-};
-
-/* the following functions are defined in misc/pti.c */
-void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
-struct pti_masterchannel *pti_request_masterchannel(u8 type,
-                                                   const char *thread_name);
-void pti_release_masterchannel(struct pti_masterchannel *mc);
-
-#endif /*PTI_H_*/
index 2032ce2eb20bff492698a1309aa043470de0991f..62d508b31f563f397d8324036c9af2b60c82f6d9 100644 (file)
@@ -97,13 +97,9 @@ void rtnetlink_init(void);
 void __rtnl_unlock(void);
 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
 
-#define ASSERT_RTNL() do { \
-       if (unlikely(!rtnl_is_locked())) { \
-               printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
-                      __FILE__,  __LINE__); \
-               dump_stack(); \
-       } \
-} while(0)
+#define ASSERT_RTNL() \
+       WARN_ONCE(!rtnl_is_locked(), \
+                 "RTNL: assertion failed at %s (%d)\n", __FILE__,  __LINE__)
 
 extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
                             struct netlink_callback *cb,
index 7b2170bfd6e7dae432478fffdbc70e1408740394..bc6bb325d1bf7c03db223c568b891e5d33dc93ca 100644 (file)
@@ -126,7 +126,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
  *     for that name.  This appears in the sysfs "modalias" attribute
  *     for driver coldplugging, and in uevents used for hotplugging
  * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
- *     when not using a GPIO line)
+ *     not using a GPIO line)
  *
  * @statistics: statistics for the spi_device
  *
index 8b8118a7fadbc74a9aa879dc934de0442bb3a013..cb4d92b79cd932eda4e178861d8345683b329bdb 100644 (file)
@@ -3226,7 +3226,6 @@ struct cfg80211_ops {
  * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN.
  * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing
  *     auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH.
- * @WIPHY_FLAG_SUPPORTS_SCHED_SCAN: The device supports scheduled scans.
  * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the
  *     firmware.
  * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP.
index 39efb968b7a4cc9f5ea06ce5e5670ef60c5f7505..0a671c32d6b96008be5432cb94fa088bac210ae4 100644 (file)
@@ -291,6 +291,31 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
 
 int inet_sk_rebuild_header(struct sock *sk);
 
+/**
+ * inet_sk_state_load - read sk->sk_state for lockless contexts
+ * @sk: socket pointer
+ *
+ * Paired with inet_sk_state_store(). Used in places we don't hold socket lock:
+ * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
+ */
+static inline int inet_sk_state_load(const struct sock *sk)
+{
+       /* state change might impact lockless readers. */
+       return smp_load_acquire(&sk->sk_state);
+}
+
+/**
+ * inet_sk_state_store - update sk->sk_state
+ * @sk: socket pointer
+ * @newstate: new state
+ *
+ * Paired with inet_sk_state_load(). Should be used in contexts where
+ * state change might impact lockless readers.
+ */
+void inet_sk_state_store(struct sock *sk, int newstate);
+
+void inet_sk_set_state(struct sock *sk, int state);
+
 static inline unsigned int __inet_ehashfn(const __be32 laddr,
                                          const __u16 lport,
                                          const __be32 faddr,
index 0ad4d0c71228aeabab489fff48c782bb58aca246..36c2d998a43c015c4b917428ec33f5839cde89b1 100644 (file)
@@ -11,7 +11,10 @@ struct netns_core {
 
        int     sysctl_somaxconn;
 
-       struct prot_inuse __percpu *inuse;
+#ifdef CONFIG_PROC_FS
+       int __percpu *sock_inuse;
+       struct prot_inuse __percpu *prot_inuse;
+#endif
 };
 
 #endif
index 0105445cab83d32008b3526794c077f4bfbd9816..5cd3cf51cb35906e98a2deb37e3a33111ccf6c55 100644 (file)
@@ -39,9 +39,11 @@ struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
                                bool create);
 void tcf_chain_put(struct tcf_chain *chain);
 int tcf_block_get(struct tcf_block **p_block,
-                 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q);
+                 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+                 struct netlink_ext_ack *extack);
 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-                     struct tcf_block_ext_info *ei);
+                     struct tcf_block_ext_info *ei,
+                     struct netlink_ext_ack *extack);
 void tcf_block_put(struct tcf_block *block);
 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
                       struct tcf_block_ext_info *ei);
@@ -77,7 +79,8 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 #else
 static inline
 int tcf_block_get(struct tcf_block **p_block,
-                 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
+                 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+                 struct netlink_ext_ack *extack)
 {
        return 0;
 }
@@ -694,9 +697,7 @@ struct tc_cls_matchall_offload {
 };
 
 enum tc_clsbpf_command {
-       TC_CLSBPF_ADD,
-       TC_CLSBPF_REPLACE,
-       TC_CLSBPF_DESTROY,
+       TC_CLSBPF_OFFLOAD,
        TC_CLSBPF_STATS,
 };
 
@@ -705,6 +706,7 @@ struct tc_cls_bpf_offload {
        enum tc_clsbpf_command command;
        struct tcf_exts *exts;
        struct bpf_prog *prog;
+       struct bpf_prog *oldprog;
        const char *name;
        bool exts_integrated;
        u32 gen_flags;
index 2404692288519514fcbf81621925b300b64cdba7..e2c75f52557bc977d4b9d75d9c8bc44ad8201b48 100644 (file)
@@ -89,7 +89,8 @@ extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
 
 int fifo_set_limit(struct Qdisc *q, unsigned int limit);
 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
-                              unsigned int limit);
+                              unsigned int limit,
+                              struct netlink_ext_ack *extack);
 
 int register_qdisc(struct Qdisc_ops *qops);
 int unregister_qdisc(struct Qdisc_ops *qops);
@@ -101,7 +102,8 @@ void qdisc_hash_del(struct Qdisc *q);
 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
 struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
-                                       struct nlattr *tab);
+                                       struct nlattr *tab,
+                                       struct netlink_ext_ack *extack);
 void qdisc_put_rtab(struct qdisc_rate_table *tab);
 void qdisc_put_stab(struct qdisc_size_table *tab);
 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
index bc6b25faba9963b17dbe3123b6eac8ddea4ca72f..ac029d5d88e412526a195844f762c6c363266635 100644 (file)
@@ -151,20 +151,23 @@ struct Qdisc_class_ops {
        /* Child qdisc manipulation */
        struct netdev_queue *   (*select_queue)(struct Qdisc *, struct tcmsg *);
        int                     (*graft)(struct Qdisc *, unsigned long cl,
-                                       struct Qdisc *, struct Qdisc **);
+                                       struct Qdisc *, struct Qdisc **,
+                                       struct netlink_ext_ack *extack);
        struct Qdisc *          (*leaf)(struct Qdisc *, unsigned long cl);
        void                    (*qlen_notify)(struct Qdisc *, unsigned long);
 
        /* Class manipulation routines */
        unsigned long           (*find)(struct Qdisc *, u32 classid);
        int                     (*change)(struct Qdisc *, u32, u32,
-                                       struct nlattr **, unsigned long *);
+                                       struct nlattr **, unsigned long *,
+                                       struct netlink_ext_ack *);
        int                     (*delete)(struct Qdisc *, unsigned long);
        void                    (*walk)(struct Qdisc *, struct qdisc_walker * arg);
 
        /* Filter manipulation */
        struct tcf_block *      (*tcf_block)(struct Qdisc *sch,
-                                            unsigned long arg);
+                                            unsigned long arg,
+                                            struct netlink_ext_ack *extack);
        unsigned long           (*bind_tcf)(struct Qdisc *, unsigned long,
                                        u32 classid);
        void                    (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -189,11 +192,13 @@ struct Qdisc_ops {
        struct sk_buff *        (*dequeue)(struct Qdisc *);
        struct sk_buff *        (*peek)(struct Qdisc *);
 
-       int                     (*init)(struct Qdisc *sch, struct nlattr *arg);
+       int                     (*init)(struct Qdisc *sch, struct nlattr *arg,
+                                       struct netlink_ext_ack *extack);
        void                    (*reset)(struct Qdisc *);
        void                    (*destroy)(struct Qdisc *);
        int                     (*change)(struct Qdisc *sch,
-                                         struct nlattr *arg);
+                                         struct nlattr *arg,
+                                         struct netlink_ext_ack *extack);
        void                    (*attach)(struct Qdisc *sch);
 
        int                     (*dump)(struct Qdisc *, struct sk_buff *);
@@ -466,9 +471,11 @@ void qdisc_destroy(struct Qdisc *qdisc);
 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
                               unsigned int len);
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
-                         const struct Qdisc_ops *ops);
+                         const struct Qdisc_ops *ops,
+                         struct netlink_ext_ack *extack);
 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
-                               const struct Qdisc_ops *ops, u32 parentid);
+                               const struct Qdisc_ops *ops, u32 parentid,
+                               struct netlink_ext_ack *extack);
 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
                               const struct qdisc_size_table *stab);
 int skb_do_redirect(struct sk_buff *);
index 9a9047268d375496bccc954d03ec20baf4177fd3..6c1db823f8b90e63629ab2c105bdf825afba0547 100644 (file)
@@ -1262,6 +1262,7 @@ proto_memory_pressure(struct proto *prot)
 /* Called with local bh disabled */
 void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
 int sock_prot_inuse_get(struct net *net, struct proto *proto);
+int sock_inuse_get(struct net *net);
 #else
 static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
                int inc)
@@ -2332,31 +2333,6 @@ static inline bool sk_listener(const struct sock *sk)
        return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
 }
 
-/**
- * sk_state_load - read sk->sk_state for lockless contexts
- * @sk: socket pointer
- *
- * Paired with sk_state_store(). Used in places we do not hold socket lock :
- * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
- */
-static inline int sk_state_load(const struct sock *sk)
-{
-       return smp_load_acquire(&sk->sk_state);
-}
-
-/**
- * sk_state_store - update sk->sk_state
- * @sk: socket pointer
- * @newstate: new state
- *
- * Paired with sk_state_load(). Should be used in contexts where
- * state change might impact lockless readers.
- */
-static inline void sk_state_store(struct sock *sk, int newstate)
-{
-       smp_store_release(&sk->sk_state, newstate);
-}
-
 void sock_enable_timestamp(struct sock *sk, int flag);
 int sock_get_timestamp(struct sock *, struct timeval __user *);
 int sock_get_timestampns(struct sock *, struct timespec __user *);
index 1ec0c4760646d6242a93e4a13024ffb33a11ff06..079ea9455bcd78cbd78a695452b59d03b42887cd 100644 (file)
@@ -1051,6 +1051,7 @@ struct xfrm_offload {
 #define        XFRM_GSO_SEGMENT        16
 #define        XFRM_GRO                32
 #define        XFRM_ESP_NO_TRAILER     64
+#define        XFRM_DEV_RESUME         128
 
        __u32                   status;
 #define CRYPTO_SUCCESS                         1
@@ -1874,21 +1875,28 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
 {
        return skb->sp->xvec[skb->sp->len - 1];
 }
+#endif
+
 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
 {
+#ifdef CONFIG_XFRM
        struct sec_path *sp = skb->sp;
 
        if (!sp || !sp->olen || sp->len != sp->olen)
                return NULL;
 
        return &sp->ovec[sp->olen - 1];
-}
+#else
+       return NULL;
 #endif
+}
 
 void __net_init xfrm_dev_init(void);
 
 #ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
+void xfrm_dev_resume(struct sk_buff *skb);
+void xfrm_dev_backlog(struct softnet_data *sd);
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
                       struct xfrm_user_offload *xuo);
 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
@@ -1902,6 +1910,8 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
                return false;
 
        xdst = (struct xfrm_dst *) dst;
+       if (!x->xso.offload_handle && !xdst->child->xfrm)
+               return true;
        if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
            !xdst->child->xfrm)
                return true;
@@ -1923,15 +1933,24 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
         struct net_device *dev = xso->dev;
 
        if (dev && dev->xfrmdev_ops) {
-               dev->xfrmdev_ops->xdo_dev_state_free(x);
+               if (dev->xfrmdev_ops->xdo_dev_state_free)
+                       dev->xfrmdev_ops->xdo_dev_state_free(x);
                xso->dev = NULL;
                dev_put(dev);
        }
 }
 #else
-static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+static inline void xfrm_dev_resume(struct sk_buff *skb)
 {
-       return 0;
+}
+
+static inline void xfrm_dev_backlog(struct softnet_data *sd)
+{
+}
+
+static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
+{
+       return skb;
 }
 
 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
index e4b0b8e099325f2801e4f3af168004c603c23794..2c735a3e66133fc08740b4df6d64919c491c9d1e 100644 (file)
@@ -211,7 +211,7 @@ TRACE_EVENT(kvm_ack_irq,
        { KVM_TRACE_MMIO_WRITE, "write" }
 
 TRACE_EVENT(kvm_mmio,
-       TP_PROTO(int type, int len, u64 gpa, u64 val),
+       TP_PROTO(int type, int len, u64 gpa, void *val),
        TP_ARGS(type, len, gpa, val),
 
        TP_STRUCT__entry(
@@ -225,7 +225,10 @@ TRACE_EVENT(kvm_mmio,
                __entry->type           = type;
                __entry->len            = len;
                __entry->gpa            = gpa;
-               __entry->val            = val;
+               __entry->val            = 0;
+               if (val)
+                       memcpy(&__entry->val, val,
+                              min_t(u32, sizeof(__entry->val), len));
        ),
 
        TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
index ec4dade2446607bd124133f22366c63e5caba852..3537c5f34d77215867f55752490ff7624e587cc8 100644 (file)
@@ -6,7 +6,50 @@
 #define _TRACE_SOCK_H
 
 #include <net/sock.h>
+#include <net/ipv6.h>
 #include <linux/tracepoint.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+
+/* The protocol traced by sock_set_state */
+#define inet_protocol_names            \
+               EM(IPPROTO_TCP)                 \
+               EM(IPPROTO_DCCP)                \
+               EMe(IPPROTO_SCTP)
+
+#define tcp_state_names                        \
+               EM(TCP_ESTABLISHED)             \
+               EM(TCP_SYN_SENT)                \
+               EM(TCP_SYN_RECV)                \
+               EM(TCP_FIN_WAIT1)               \
+               EM(TCP_FIN_WAIT2)               \
+               EM(TCP_TIME_WAIT)               \
+               EM(TCP_CLOSE)                   \
+               EM(TCP_CLOSE_WAIT)              \
+               EM(TCP_LAST_ACK)                \
+               EM(TCP_LISTEN)                  \
+               EM(TCP_CLOSING)                 \
+               EMe(TCP_NEW_SYN_RECV)
+
+/* enums need to be exported to user space */
+#undef EM
+#undef EMe
+#define EM(a)       TRACE_DEFINE_ENUM(a);
+#define EMe(a)      TRACE_DEFINE_ENUM(a);
+
+inet_protocol_names
+tcp_state_names
+
+#undef EM
+#undef EMe
+#define EM(a)       { a, #a },
+#define EMe(a)      { a, #a }
+
+#define show_inet_protocol_name(val)    \
+       __print_symbolic(val, inet_protocol_names)
+
+#define show_tcp_state_name(val)        \
+       __print_symbolic(val, tcp_state_names)
 
 TRACE_EVENT(sock_rcvqueue_full,
 
@@ -63,6 +106,69 @@ TRACE_EVENT(sock_exceed_buf_limit,
                __entry->rmem_alloc)
 );
 
+TRACE_EVENT(inet_sock_set_state,
+
+       TP_PROTO(const struct sock *sk, const int oldstate, const int newstate),
+
+       TP_ARGS(sk, oldstate, newstate),
+
+       TP_STRUCT__entry(
+               __field(const void *, skaddr)
+               __field(int, oldstate)
+               __field(int, newstate)
+               __field(__u16, sport)
+               __field(__u16, dport)
+               __field(__u8, protocol)
+               __array(__u8, saddr, 4)
+               __array(__u8, daddr, 4)
+               __array(__u8, saddr_v6, 16)
+               __array(__u8, daddr_v6, 16)
+       ),
+
+       TP_fast_assign(
+               struct inet_sock *inet = inet_sk(sk);
+               struct in6_addr *pin6;
+               __be32 *p32;
+
+               __entry->skaddr = sk;
+               __entry->oldstate = oldstate;
+               __entry->newstate = newstate;
+
+               __entry->protocol = sk->sk_protocol;
+               __entry->sport = ntohs(inet->inet_sport);
+               __entry->dport = ntohs(inet->inet_dport);
+
+               p32 = (__be32 *) __entry->saddr;
+               *p32 = inet->inet_saddr;
+
+               p32 = (__be32 *) __entry->daddr;
+               *p32 =  inet->inet_daddr;
+
+#if IS_ENABLED(CONFIG_IPV6)
+               if (sk->sk_family == AF_INET6) {
+                       pin6 = (struct in6_addr *)__entry->saddr_v6;
+                       *pin6 = sk->sk_v6_rcv_saddr;
+                       pin6 = (struct in6_addr *)__entry->daddr_v6;
+                       *pin6 = sk->sk_v6_daddr;
+               } else
+#endif
+               {
+                       pin6 = (struct in6_addr *)__entry->saddr_v6;
+                       ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
+                       pin6 = (struct in6_addr *)__entry->daddr_v6;
+                       ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
+               }
+       ),
+
+       TP_printk("protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
+                       show_inet_protocol_name(__entry->protocol),
+                       __entry->sport, __entry->dport,
+                       __entry->saddr, __entry->daddr,
+                       __entry->saddr_v6, __entry->daddr_v6,
+                       show_tcp_state_name(__entry->oldstate),
+                       show_tcp_state_name(__entry->newstate))
+);
+
 #endif /* _TRACE_SOCK_H */
 
 /* This part must be outside protection */
index 07cccca6cbf1762684152146372e35e1cd758338..8e88a16715380749f2c764d369d17f15ebb2fcc3 100644 (file)
@@ -9,22 +9,6 @@
 #include <linux/tracepoint.h>
 #include <net/ipv6.h>
 
-#define tcp_state_name(state)  { state, #state }
-#define show_tcp_state_name(val)                       \
-       __print_symbolic(val,                           \
-               tcp_state_name(TCP_ESTABLISHED),        \
-               tcp_state_name(TCP_SYN_SENT),           \
-               tcp_state_name(TCP_SYN_RECV),           \
-               tcp_state_name(TCP_FIN_WAIT1),          \
-               tcp_state_name(TCP_FIN_WAIT2),          \
-               tcp_state_name(TCP_TIME_WAIT),          \
-               tcp_state_name(TCP_CLOSE),              \
-               tcp_state_name(TCP_CLOSE_WAIT),         \
-               tcp_state_name(TCP_LAST_ACK),           \
-               tcp_state_name(TCP_LISTEN),             \
-               tcp_state_name(TCP_CLOSING),            \
-               tcp_state_name(TCP_NEW_SYN_RECV))
-
 /*
  * tcp event with arguments sk and skb
  *
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
new file mode 100644 (file)
index 0000000..5cb360b
--- /dev/null
@@ -0,0 +1,644 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _UAPI_LINUX_BATADV_PACKET_H_
+#define _UAPI_LINUX_BATADV_PACKET_H_
+
+#include <asm/byteorder.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+
+/**
+ * batadv_tp_is_error() - Check throughput meter return code for error
+ * @n: throughput meter return code
+ *
+ * Return: 0 when not error was detected, != 0 otherwise
+ */
+#define batadv_tp_is_error(n) ((__u8)(n) > 127 ? 1 : 0)
+
+/**
+ * enum batadv_packettype - types for batman-adv encapsulated packets
+ * @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV
+ * @BATADV_BCAST: broadcast packets carrying broadcast payload
+ * @BATADV_CODED: network coded packets
+ * @BATADV_ELP: echo location packets for B.A.T.M.A.N. V
+ * @BATADV_OGM2: originator messages for B.A.T.M.A.N. V
+ *
+ * @BATADV_UNICAST: unicast packets carrying unicast payload traffic
+ * @BATADV_UNICAST_FRAG: unicast packets carrying a fragment of the original
+ *     payload packet
+ * @BATADV_UNICAST_4ADDR: unicast packet including the originator address of
+ *     the sender
+ * @BATADV_ICMP: unicast packet like IP ICMP used for ping or traceroute
+ * @BATADV_UNICAST_TVLV: unicast packet carrying TVLV containers
+ */
+enum batadv_packettype {
+       /* 0x00 - 0x3f: local packets or special rules for handling */
+       BATADV_IV_OGM           = 0x00,
+       BATADV_BCAST            = 0x01,
+       BATADV_CODED            = 0x02,
+       BATADV_ELP              = 0x03,
+       BATADV_OGM2             = 0x04,
+       /* 0x40 - 0x7f: unicast */
+#define BATADV_UNICAST_MIN     0x40
+       BATADV_UNICAST          = 0x40,
+       BATADV_UNICAST_FRAG     = 0x41,
+       BATADV_UNICAST_4ADDR    = 0x42,
+       BATADV_ICMP             = 0x43,
+       BATADV_UNICAST_TVLV     = 0x44,
+#define BATADV_UNICAST_MAX     0x7f
+       /* 0x80 - 0xff: reserved */
+};
+
+/**
+ * enum batadv_subtype - packet subtype for unicast4addr
+ * @BATADV_P_DATA: user payload
+ * @BATADV_P_DAT_DHT_GET: DHT request message
+ * @BATADV_P_DAT_DHT_PUT: DHT store message
+ * @BATADV_P_DAT_CACHE_REPLY: ARP reply generated by DAT
+ */
+enum batadv_subtype {
+       BATADV_P_DATA                   = 0x01,
+       BATADV_P_DAT_DHT_GET            = 0x02,
+       BATADV_P_DAT_DHT_PUT            = 0x03,
+       BATADV_P_DAT_CACHE_REPLY        = 0x04,
+};
+
+/* this file is included by batctl which needs these defines */
+#define BATADV_COMPAT_VERSION 15
+
+/**
+ * enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets
+ * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was
+ *     previously received from someone else than the best neighbor.
+ * @BATADV_PRIMARIES_FIRST_HOP: flag unused.
+ * @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a
+ *     one hop neighbor on the interface where it was originally received.
+ */
+enum batadv_iv_flags {
+       BATADV_NOT_BEST_NEXT_HOP   = 1UL << 0,
+       BATADV_PRIMARIES_FIRST_HOP = 1UL << 1,
+       BATADV_DIRECTLINK          = 1UL << 2,
+};
+
+/**
+ * enum batadv_icmp_packettype - ICMP message types
+ * @BATADV_ECHO_REPLY: success reply to BATADV_ECHO_REQUEST
+ * @BATADV_DESTINATION_UNREACHABLE: failure when route to destination not found
+ * @BATADV_ECHO_REQUEST: request BATADV_ECHO_REPLY from destination
+ * @BATADV_TTL_EXCEEDED: error after BATADV_ECHO_REQUEST traversed too many hops
+ * @BATADV_PARAMETER_PROBLEM: return code for malformed messages
+ * @BATADV_TP: throughput meter packet
+ */
+enum batadv_icmp_packettype {
+       BATADV_ECHO_REPLY              = 0,
+       BATADV_DESTINATION_UNREACHABLE = 3,
+       BATADV_ECHO_REQUEST            = 8,
+       BATADV_TTL_EXCEEDED            = 11,
+       BATADV_PARAMETER_PROBLEM       = 12,
+       BATADV_TP                      = 15,
+};
+
+/**
+ * enum batadv_mcast_flags - flags for multicast capabilities and settings
+ * @BATADV_MCAST_WANT_ALL_UNSNOOPABLES: we want all packets destined for
+ *  224.0.0.0/24 or ff02::1
+ * @BATADV_MCAST_WANT_ALL_IPV4: we want all IPv4 multicast packets
+ * @BATADV_MCAST_WANT_ALL_IPV6: we want all IPv6 multicast packets
+ */
+enum batadv_mcast_flags {
+       BATADV_MCAST_WANT_ALL_UNSNOOPABLES      = 1UL << 0,
+       BATADV_MCAST_WANT_ALL_IPV4              = 1UL << 1,
+       BATADV_MCAST_WANT_ALL_IPV6              = 1UL << 2,
+};
+
+/* tt data subtypes */
+#define BATADV_TT_DATA_TYPE_MASK 0x0F
+
+/**
+ * enum batadv_tt_data_flags - flags for tt data tvlv
+ * @BATADV_TT_OGM_DIFF: TT diff propagated through OGM
+ * @BATADV_TT_REQUEST: TT request message
+ * @BATADV_TT_RESPONSE: TT response message
+ * @BATADV_TT_FULL_TABLE: contains full table to replace existing table
+ */
+enum batadv_tt_data_flags {
+       BATADV_TT_OGM_DIFF   = 1UL << 0,
+       BATADV_TT_REQUEST    = 1UL << 1,
+       BATADV_TT_RESPONSE   = 1UL << 2,
+       BATADV_TT_FULL_TABLE = 1UL << 4,
+};
+
+/**
+ * enum batadv_vlan_flags - flags for the four MSB of any vlan ID field
+ * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
+ */
+enum batadv_vlan_flags {
+       BATADV_VLAN_HAS_TAG     = 1UL << 15,
+};
+
+/**
+ * enum batadv_bla_claimframe - claim frame types for the bridge loop avoidance
+ * @BATADV_CLAIM_TYPE_CLAIM: claim of a client mac address
+ * @BATADV_CLAIM_TYPE_UNCLAIM: unclaim of a client mac address
+ * @BATADV_CLAIM_TYPE_ANNOUNCE: announcement of backbone with current crc
+ * @BATADV_CLAIM_TYPE_REQUEST: request of full claim table
+ * @BATADV_CLAIM_TYPE_LOOPDETECT: mesh-traversing loop detect packet
+ */
+enum batadv_bla_claimframe {
+       BATADV_CLAIM_TYPE_CLAIM         = 0x00,
+       BATADV_CLAIM_TYPE_UNCLAIM       = 0x01,
+       BATADV_CLAIM_TYPE_ANNOUNCE      = 0x02,
+       BATADV_CLAIM_TYPE_REQUEST       = 0x03,
+       BATADV_CLAIM_TYPE_LOOPDETECT    = 0x04,
+};
+
+/**
+ * enum batadv_tvlv_type - tvlv type definitions
+ * @BATADV_TVLV_GW: gateway tvlv
+ * @BATADV_TVLV_DAT: distributed arp table tvlv
+ * @BATADV_TVLV_NC: network coding tvlv
+ * @BATADV_TVLV_TT: translation table tvlv
+ * @BATADV_TVLV_ROAM: roaming advertisement tvlv
+ * @BATADV_TVLV_MCAST: multicast capability tvlv
+ */
+enum batadv_tvlv_type {
+       BATADV_TVLV_GW          = 0x01,
+       BATADV_TVLV_DAT         = 0x02,
+       BATADV_TVLV_NC          = 0x03,
+       BATADV_TVLV_TT          = 0x04,
+       BATADV_TVLV_ROAM        = 0x05,
+       BATADV_TVLV_MCAST       = 0x06,
+};
+
+#pragma pack(2)
+/* the destination hardware field in the ARP frame is used to
+ * transport the claim type and the group id
+ */
+struct batadv_bla_claim_dst {
+       __u8   magic[3];        /* FF:43:05 */
+       __u8   type;            /* bla_claimframe */
+       __be16 group;           /* group id */
+};
+
+#pragma pack()
+
+/**
+ * struct batadv_ogm_packet - ogm (routing protocol) packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @flags: contains routing relevant flags - see enum batadv_iv_flags
+ * @seqno: sequence identification
+ * @orig: address of the source node
+ * @prev_sender: address of the previous sender
+ * @reserved: reserved byte for alignment
+ * @tq: transmission quality
+ * @tvlv_len: length of tvlv data following the ogm header
+ */
+struct batadv_ogm_packet {
+       __u8   packet_type;
+       __u8   version;
+       __u8   ttl;
+       __u8   flags;
+       __be32 seqno;
+       __u8   orig[ETH_ALEN];
+       __u8   prev_sender[ETH_ALEN];
+       __u8   reserved;
+       __u8   tq;
+       __be16 tvlv_len;
+       /* __packed is not needed as the struct size is divisible by 4,
+        * and the largest data type in this struct has a size of 4.
+        */
+};
+
+#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
+
+/**
+ * struct batadv_ogm2_packet - ogm2 (routing protocol) packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
+ * @flags: reseved for routing relevant flags - currently always 0
+ * @seqno: sequence number
+ * @orig: originator mac address
+ * @tvlv_len: length of the appended tvlv buffer (in bytes)
+ * @throughput: the currently flooded path throughput
+ */
+struct batadv_ogm2_packet {
+       __u8   packet_type;
+       __u8   version;
+       __u8   ttl;
+       __u8   flags;
+       __be32 seqno;
+       __u8   orig[ETH_ALEN];
+       __be16 tvlv_len;
+       __be32 throughput;
+       /* __packed is not needed as the struct size is divisible by 4,
+        * and the largest data type in this struct has a size of 4.
+        */
+};
+
+#define BATADV_OGM2_HLEN sizeof(struct batadv_ogm2_packet)
+
+/**
+ * struct batadv_elp_packet - elp (neighbor discovery) packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @orig: originator mac address
+ * @seqno: sequence number
+ * @elp_interval: currently used ELP sending interval in ms
+ */
+struct batadv_elp_packet {
+       __u8   packet_type;
+       __u8   version;
+       __u8   orig[ETH_ALEN];
+       __be32 seqno;
+       __be32 elp_interval;
+};
+
+#define BATADV_ELP_HLEN sizeof(struct batadv_elp_packet)
+
+/**
+ * struct batadv_icmp_header - common members among all the ICMP packets
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
+ * @align: not used - useful for alignment purposes only
+ *
+ * This structure is used for ICMP packets parsing only and it is never sent
+ * over the wire. The alignment field at the end is there to ensure that
+ * members are padded the same way as they are in real packets.
+ */
+struct batadv_icmp_header {
+       __u8 packet_type;
+       __u8 version;
+       __u8 ttl;
+       __u8 msg_type; /* see ICMP message types above */
+       __u8 dst[ETH_ALEN];
+       __u8 orig[ETH_ALEN];
+       __u8 uid;
+       __u8 align[3];
+};
+
+/**
+ * struct batadv_icmp_packet - ICMP packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
+ * @reserved: not used - useful for alignment
+ * @seqno: ICMP sequence number
+ */
+struct batadv_icmp_packet {
+       __u8   packet_type;
+       __u8   version;
+       __u8   ttl;
+       __u8   msg_type; /* see ICMP message types above */
+       __u8   dst[ETH_ALEN];
+       __u8   orig[ETH_ALEN];
+       __u8   uid;
+       __u8   reserved;
+       __be16 seqno;
+};
+
+/**
+ * struct batadv_icmp_tp_packet - ICMP TP Meter packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
+ * @subtype: TP packet subtype (see batadv_icmp_tp_subtype)
+ * @session: TP session identifier
+ * @seqno: the TP sequence number
+ * @timestamp: time when the packet has been sent. This value is filled in a
+ *  TP_MSG and echoed back in the next TP_ACK so that the sender can compute the
+ *  RTT. Since it is read only by the host which wrote it, there is no need to
+ *  store it using network order
+ */
+struct batadv_icmp_tp_packet {
+       __u8   packet_type;
+       __u8   version;
+       __u8   ttl;
+       __u8   msg_type; /* see ICMP message types above */
+       __u8   dst[ETH_ALEN];
+       __u8   orig[ETH_ALEN];
+       __u8   uid;
+       __u8   subtype;
+       __u8   session[2];
+       __be32 seqno;
+       __be32 timestamp;
+};
+
+/**
+ * enum batadv_icmp_tp_subtype - ICMP TP Meter packet subtypes
+ * @BATADV_TP_MSG: Msg from sender to receiver
+ * @BATADV_TP_ACK: acknowledgment from receiver to sender
+ */
+enum batadv_icmp_tp_subtype {
+       BATADV_TP_MSG   = 0,
+       BATADV_TP_ACK,
+};
+
+#define BATADV_RR_LEN 16
+
+/**
+ * struct batadv_icmp_packet_rr - ICMP RouteRecord packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
+ * @rr_cur: number of entries the rr array
+ * @seqno: ICMP sequence number
+ * @rr: route record array
+ */
+struct batadv_icmp_packet_rr {
+       __u8   packet_type;
+       __u8   version;
+       __u8   ttl;
+       __u8   msg_type; /* see ICMP message types above */
+       __u8   dst[ETH_ALEN];
+       __u8   orig[ETH_ALEN];
+       __u8   uid;
+       __u8   rr_cur;
+       __be16 seqno;
+       __u8   rr[BATADV_RR_LEN][ETH_ALEN];
+};
+
+#define BATADV_ICMP_MAX_PACKET_SIZE    sizeof(struct batadv_icmp_packet_rr)
+
+/* All packet headers in front of an ethernet header have to be completely
+ * divisible by 2 but not by 4 to make the payload after the ethernet
+ * header again 4 bytes boundary aligned.
+ *
+ * A packing of 2 is necessary to avoid extra padding at the end of the struct
+ * caused by a structure member which is larger than two bytes. Otherwise
+ * the structure would not fulfill the previously mentioned rule to avoid the
+ * misalignment of the payload after the ethernet header. It may also lead to
+ * leakage of information when the padding it not initialized before sending.
+ */
+#pragma pack(2)
+
+/**
+ * struct batadv_unicast_packet - unicast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @ttvn: translation table version number
+ * @dest: originator destination of the unicast packet
+ */
+struct batadv_unicast_packet {
+       __u8 packet_type;
+       __u8 version;
+       __u8 ttl;
+       __u8 ttvn; /* destination translation table version number */
+       __u8 dest[ETH_ALEN];
+       /* "4 bytes boundary + 2 bytes" long to make the payload after the
+        * following ethernet header again 4 bytes boundary aligned
+        */
+};
+
+/**
+ * struct batadv_unicast_4addr_packet - extended unicast packet
+ * @u: common unicast packet header
+ * @src: address of the source
+ * @subtype: packet subtype
+ * @reserved: reserved byte for alignment
+ */
+struct batadv_unicast_4addr_packet {
+       struct batadv_unicast_packet u;
+       __u8 src[ETH_ALEN];
+       __u8 subtype;
+       __u8 reserved;
+       /* "4 bytes boundary + 2 bytes" long to make the payload after the
+        * following ethernet header again 4 bytes boundary aligned
+        */
+};
+
+/**
+ * struct batadv_frag_packet - fragmented packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @dest: final destination used when routing fragments
+ * @orig: originator of the fragment used when merging the packet
+ * @no: fragment number within this sequence
+ * @priority: priority of frame, from ToS IP precedence or 802.1p
+ * @reserved: reserved byte for alignment
+ * @seqno: sequence identification
+ * @total_size: size of the merged packet
+ */
+struct batadv_frag_packet {
+       __u8   packet_type;
+       __u8   version;  /* batman version field */
+       __u8   ttl;
+#if defined(__BIG_ENDIAN_BITFIELD)
+       __u8   no:4;
+       __u8   priority:3;
+       __u8   reserved:1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       __u8   reserved:1;
+       __u8   priority:3;
+       __u8   no:4;
+#else
+#error "unknown bitfield endianness"
+#endif
+       __u8   dest[ETH_ALEN];
+       __u8   orig[ETH_ALEN];
+       __be16 seqno;
+       __be16 total_size;
+};
+
+/**
+ * struct batadv_bcast_packet - broadcast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @reserved: reserved byte for alignment
+ * @seqno: sequence identification
+ * @orig: originator of the broadcast packet
+ */
+struct batadv_bcast_packet {
+       __u8   packet_type;
+       __u8   version;  /* batman version field */
+       __u8   ttl;
+       __u8   reserved;
+       __be32 seqno;
+       __u8   orig[ETH_ALEN];
+       /* "4 bytes boundary + 2 bytes" long to make the payload after the
+        * following ethernet header again 4 bytes boundary aligned
+        */
+};
+
+/**
+ * struct batadv_coded_packet - network coded packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @first_source: original source of first included packet
+ * @first_orig_dest: original destinal of first included packet
+ * @first_crc: checksum of first included packet
+ * @first_ttvn: tt-version number of first included packet
+ * @second_ttl: ttl of second packet
+ * @second_dest: second receiver of this coded packet
+ * @second_source: original source of second included packet
+ * @second_orig_dest: original destination of second included packet
+ * @second_crc: checksum of second included packet
+ * @second_ttvn: tt version number of second included packet
+ * @coded_len: length of network coded part of the payload
+ */
+struct batadv_coded_packet {
+       __u8   packet_type;
+       __u8   version;  /* batman version field */
+       __u8   ttl;
+       __u8   first_ttvn;
+       /* __u8 first_dest[ETH_ALEN]; - saved in mac header destination */
+       __u8   first_source[ETH_ALEN];
+       __u8   first_orig_dest[ETH_ALEN];
+       __be32 first_crc;
+       __u8   second_ttl;
+       __u8   second_ttvn;
+       __u8   second_dest[ETH_ALEN];
+       __u8   second_source[ETH_ALEN];
+       __u8   second_orig_dest[ETH_ALEN];
+       __be32 second_crc;
+       __be16 coded_len;
+};
+
+#pragma pack()
+
+/**
+ * struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @reserved: reserved field (for packet alignment)
+ * @src: address of the source
+ * @dst: address of the destination
+ * @tvlv_len: length of tvlv data following the unicast tvlv header
+ * @align: 2 bytes to align the header to a 4 byte boundary
+ */
+struct batadv_unicast_tvlv_packet {
+       __u8   packet_type;
+       __u8   version;  /* batman version field */
+       __u8   ttl;
+       __u8   reserved;
+       __u8   dst[ETH_ALEN];
+       __u8   src[ETH_ALEN];
+       __be16 tvlv_len;
+       __u16  align;
+};
+
+/**
+ * struct batadv_tvlv_hdr - base tvlv header struct
+ * @type: tvlv container type (see batadv_tvlv_type)
+ * @version: tvlv container version
+ * @len: tvlv container length
+ */
+struct batadv_tvlv_hdr {
+       __u8   type;
+       __u8   version;
+       __be16 len;
+};
+
+/**
+ * struct batadv_tvlv_gateway_data - gateway data propagated through gw tvlv
+ *  container
+ * @bandwidth_down: advertised uplink download bandwidth
+ * @bandwidth_up: advertised uplink upload bandwidth
+ */
+struct batadv_tvlv_gateway_data {
+       __be32 bandwidth_down;
+       __be32 bandwidth_up;
+};
+
+/**
+ * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container
+ * @flags: translation table flags (see batadv_tt_data_flags)
+ * @ttvn: translation table version number
+ * @num_vlan: number of announced VLANs. In the TVLV this struct is followed by
+ *  one batadv_tvlv_tt_vlan_data object per announced vlan
+ */
+struct batadv_tvlv_tt_data {
+       __u8   flags;
+       __u8   ttvn;
+       __be16 num_vlan;
+};
+
+/**
+ * struct batadv_tvlv_tt_vlan_data - vlan specific tt data propagated through
+ *  the tt tvlv container
+ * @crc: crc32 checksum of the entries belonging to this vlan
+ * @vid: vlan identifier
+ * @reserved: unused, useful for alignment purposes
+ */
+struct batadv_tvlv_tt_vlan_data {
+       __be32 crc;
+       __be16 vid;
+       __u16  reserved;
+};
+
+/**
+ * struct batadv_tvlv_tt_change - translation table diff data
+ * @flags: status indicators concerning the non-mesh client (see
+ *  batadv_tt_client_flags)
+ * @reserved: reserved field - useful for alignment purposes only
+ * @addr: mac address of non-mesh client that triggered this tt change
+ * @vid: VLAN identifier
+ */
+struct batadv_tvlv_tt_change {
+       __u8   flags;
+       __u8   reserved[3];
+       __u8   addr[ETH_ALEN];
+       __be16 vid;
+};
+
+/**
+ * struct batadv_tvlv_roam_adv - roaming advertisement
+ * @client: mac address of roaming client
+ * @vid: VLAN identifier
+ */
+struct batadv_tvlv_roam_adv {
+       __u8   client[ETH_ALEN];
+       __be16 vid;
+};
+
+/**
+ * struct batadv_tvlv_mcast_data - payload of a multicast tvlv
+ * @flags: multicast flags announced by the orig node
+ * @reserved: reserved field
+ */
+struct batadv_tvlv_mcast_data {
+       __u8 flags;
+       __u8 reserved[3];
+};
+
+#endif /* _UAPI_LINUX_BATADV_PACKET_H_ */
index efd641c8a5d6ef99464349790304a76f967d2054..ae00c99cbed0998d0b5eb1bcae87cf56abd00429 100644 (file)
@@ -1,18 +1,25 @@
+/* SPDX-License-Identifier: MIT */
 /* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
  *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
  *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
  */
 
 #ifndef _UAPI_LINUX_BATMAN_ADV_H_
index d84ce5c1c9aad49dd487cb9a2ae93e4e3890ee53..d6fee55dbded2bc17edca5777e18f31cee06dad2 100644 (file)
@@ -127,6 +127,7 @@ enum {
        L2TP_ATTR_UDP_ZERO_CSUM6_TX,    /* flag */
        L2TP_ATTR_UDP_ZERO_CSUM6_RX,    /* flag */
        L2TP_ATTR_PAD,
+       L2TP_ATTR_PEER_OFFSET,          /* u16 */
        __L2TP_ATTR_MAX,
 };
 
index 52ad60b3b8becc8d1f2785d9a37616130086f744..98d8637cf70da5d7b0be57577053cb53f38b2197 100644 (file)
@@ -1418,6 +1418,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
                break;
        case PTR_TO_STACK:
                pointer_desc = "stack ";
+               /* The stack spill tracking logic in check_stack_write()
+                * and check_stack_read() relies on stack accesses being
+                * aligned.
+                */
+               strict = true;
                break;
        default:
                break;
@@ -1521,6 +1526,29 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env,
        return env->subprog_stack_depth[subprog];
 }
 
+/* truncate register to smaller size (in bytes)
+ * must be called with size < BPF_REG_SIZE
+ */
+static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
+{
+       u64 mask;
+
+       /* clear high bits in bit representation */
+       reg->var_off = tnum_cast(reg->var_off, size);
+
+       /* fix arithmetic bounds */
+       mask = ((u64)1 << (size * 8)) - 1;
+       if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
+               reg->umin_value &= mask;
+               reg->umax_value &= mask;
+       } else {
+               reg->umin_value = 0;
+               reg->umax_value = mask;
+       }
+       reg->smin_value = reg->umin_value;
+       reg->smax_value = reg->umax_value;
+}
+
 /* check whether memory at (regno + off) is accessible for t = (read | write)
  * if t==write, value_regno is a register which value is stored into memory
  * if t==read, value_regno is a register which will receive the value from memory
@@ -1656,9 +1684,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
        if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
            regs[value_regno].type == SCALAR_VALUE) {
                /* b/h/w load zero-extends, mark upper bits as known 0 */
-               regs[value_regno].var_off =
-                       tnum_cast(regs[value_regno].var_off, size);
-               __update_reg_bounds(&regs[value_regno]);
+               coerce_reg_to_size(&regs[value_regno], size);
        }
        return err;
 }
@@ -1732,6 +1758,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
                tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
                verbose(env, "invalid variable stack read R%d var_off=%s\n",
                        regno, tn_buf);
+               return -EACCES;
        }
        off = reg->off + reg->var_off.value;
        if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
@@ -2254,7 +2281,13 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                return -EINVAL;
        }
 
+       /* With LD_ABS/IND some JITs save/restore skb from r1. */
        changes_data = bpf_helper_changes_pkt_data(fn->func);
+       if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
+               verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
+                       func_id_name(func_id), func_id);
+               return -EINVAL;
+       }
 
        memset(&meta, 0, sizeof(meta));
        meta.pkt_access = fn->pkt_access;
@@ -2346,14 +2379,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
        return 0;
 }
 
-static void coerce_reg_to_32(struct bpf_reg_state *reg)
-{
-       /* clear high 32 bits */
-       reg->var_off = tnum_cast(reg->var_off, 4);
-       /* Update bounds */
-       __update_reg_bounds(reg);
-}
-
 static bool signed_add_overflows(s64 a, s64 b)
 {
        /* Do the add in u64, where overflow is well-defined */
@@ -2374,6 +2399,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
        return res > a;
 }
 
+static bool check_reg_sane_offset(struct bpf_verifier_env *env,
+                                 const struct bpf_reg_state *reg,
+                                 enum bpf_reg_type type)
+{
+       bool known = tnum_is_const(reg->var_off);
+       s64 val = reg->var_off.value;
+       s64 smin = reg->smin_value;
+
+       if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
+               verbose(env, "math between %s pointer and %lld is not allowed\n",
+                       reg_type_str[type], val);
+               return false;
+       }
+
+       if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
+               verbose(env, "%s pointer offset %d is not allowed\n",
+                       reg_type_str[type], reg->off);
+               return false;
+       }
+
+       if (smin == S64_MIN) {
+               verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
+                       reg_type_str[type]);
+               return false;
+       }
+
+       if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
+               verbose(env, "value %lld makes %s pointer be out of bounds\n",
+                       smin, reg_type_str[type]);
+               return false;
+       }
+
+       return true;
+}
+
 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
  * Caller should also handle BPF_MOV case separately.
  * If we return -EACCES, caller may want to try again treating pointer as a
@@ -2412,29 +2472,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 
        if (BPF_CLASS(insn->code) != BPF_ALU64) {
                /* 32-bit ALU ops on pointers produce (meaningless) scalars */
-               if (!env->allow_ptr_leaks)
-                       verbose(env,
-                               "R%d 32-bit pointer arithmetic prohibited\n",
-                               dst);
+               verbose(env,
+                       "R%d 32-bit pointer arithmetic prohibited\n",
+                       dst);
                return -EACCES;
        }
 
        if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
-                               dst);
+               verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
+                       dst);
                return -EACCES;
        }
        if (ptr_reg->type == CONST_PTR_TO_MAP) {
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
-                               dst);
+               verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
+                       dst);
                return -EACCES;
        }
        if (ptr_reg->type == PTR_TO_PACKET_END) {
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
-                               dst);
+               verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
+                       dst);
                return -EACCES;
        }
 
@@ -2444,6 +2500,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        dst_reg->type = ptr_reg->type;
        dst_reg->id = ptr_reg->id;
 
+       if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
+           !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
+               return -EINVAL;
+
        switch (opcode) {
        case BPF_ADD:
                /* We can take a fixed offset as long as it doesn't overflow
@@ -2497,9 +2557,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        case BPF_SUB:
                if (dst_reg == off_reg) {
                        /* scalar -= pointer.  Creates an unknown scalar */
-                       if (!env->allow_ptr_leaks)
-                               verbose(env, "R%d tried to subtract pointer from scalar\n",
-                                       dst);
+                       verbose(env, "R%d tried to subtract pointer from scalar\n",
+                               dst);
                        return -EACCES;
                }
                /* We don't allow subtraction from FP, because (according to
@@ -2507,9 +2566,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                 * be able to deal with it.
                 */
                if (ptr_reg->type == PTR_TO_STACK) {
-                       if (!env->allow_ptr_leaks)
-                               verbose(env, "R%d subtraction from stack pointer prohibited\n",
-                                       dst);
+                       verbose(env, "R%d subtraction from stack pointer prohibited\n",
+                               dst);
                        return -EACCES;
                }
                if (known && (ptr_reg->off - smin_val ==
@@ -2558,28 +2616,30 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        case BPF_AND:
        case BPF_OR:
        case BPF_XOR:
-               /* bitwise ops on pointers are troublesome, prohibit for now.
-                * (However, in principle we could allow some cases, e.g.
-                * ptr &= ~3 which would reduce min_value by 3.)
-                */
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
-                               dst, bpf_alu_string[opcode >> 4]);
+               /* bitwise ops on pointers are troublesome, prohibit. */
+               verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
+                       dst, bpf_alu_string[opcode >> 4]);
                return -EACCES;
        default:
                /* other operators (e.g. MUL,LSH) produce non-pointer results */
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
-                               dst, bpf_alu_string[opcode >> 4]);
+               verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
+                       dst, bpf_alu_string[opcode >> 4]);
                return -EACCES;
        }
 
+       if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
+               return -EINVAL;
+
        __update_reg_bounds(dst_reg);
        __reg_deduce_bounds(dst_reg);
        __reg_bound_offset(dst_reg);
        return 0;
 }
 
+/* WARNING: This function does calculations on 64-bit values, but the actual
+ * execution may occur on 32-bit values. Therefore, things like bitshifts
+ * need extra checks in the 32-bit case.
+ */
 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                                      struct bpf_insn *insn,
                                      struct bpf_reg_state *dst_reg,
@@ -2590,12 +2650,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        bool src_known, dst_known;
        s64 smin_val, smax_val;
        u64 umin_val, umax_val;
+       u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
 
-       if (BPF_CLASS(insn->code) != BPF_ALU64) {
-               /* 32-bit ALU ops are (32,32)->64 */
-               coerce_reg_to_32(dst_reg);
-               coerce_reg_to_32(&src_reg);
-       }
        smin_val = src_reg.smin_value;
        smax_val = src_reg.smax_value;
        umin_val = src_reg.umin_value;
@@ -2603,6 +2659,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        src_known = tnum_is_const(src_reg.var_off);
        dst_known = tnum_is_const(dst_reg->var_off);
 
+       if (!src_known &&
+           opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
+               __mark_reg_unknown(dst_reg);
+               return 0;
+       }
+
        switch (opcode) {
        case BPF_ADD:
                if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
@@ -2731,9 +2793,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                __update_reg_bounds(dst_reg);
                break;
        case BPF_LSH:
-               if (umax_val > 63) {
-                       /* Shifts greater than 63 are undefined.  This includes
-                        * shifts by a negative number.
+               if (umax_val >= insn_bitness) {
+                       /* Shifts greater than 31 or 63 are undefined.
+                        * This includes shifts by a negative number.
                         */
                        mark_reg_unknown(env, regs, insn->dst_reg);
                        break;
@@ -2759,27 +2821,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                __update_reg_bounds(dst_reg);
                break;
        case BPF_RSH:
-               if (umax_val > 63) {
-                       /* Shifts greater than 63 are undefined.  This includes
-                        * shifts by a negative number.
+               if (umax_val >= insn_bitness) {
+                       /* Shifts greater than 31 or 63 are undefined.
+                        * This includes shifts by a negative number.
                         */
                        mark_reg_unknown(env, regs, insn->dst_reg);
                        break;
                }
-               /* BPF_RSH is an unsigned shift, so make the appropriate casts */
-               if (dst_reg->smin_value < 0) {
-                       if (umin_val) {
-                               /* Sign bit will be cleared */
-                               dst_reg->smin_value = 0;
-                       } else {
-                               /* Lost sign bit information */
-                               dst_reg->smin_value = S64_MIN;
-                               dst_reg->smax_value = S64_MAX;
-                       }
-               } else {
-                       dst_reg->smin_value =
-                               (u64)(dst_reg->smin_value) >> umax_val;
-               }
+               /* BPF_RSH is an unsigned shift.  If the value in dst_reg might
+                * be negative, then either:
+                * 1) src_reg might be zero, so the sign bit of the result is
+                *    unknown, so we lose our signed bounds
+                * 2) it's known negative, thus the unsigned bounds capture the
+                *    signed bounds
+                * 3) the signed bounds cross zero, so they tell us nothing
+                *    about the result
+                * If the value in dst_reg is known nonnegative, then again the
+                * unsigned bounts capture the signed bounds.
+                * Thus, in all cases it suffices to blow away our signed bounds
+                * and rely on inferring new ones from the unsigned bounds and
+                * var_off of the result.
+                */
+               dst_reg->smin_value = S64_MIN;
+               dst_reg->smax_value = S64_MAX;
                if (src_known)
                        dst_reg->var_off = tnum_rshift(dst_reg->var_off,
                                                       umin_val);
@@ -2795,6 +2859,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                break;
        }
 
+       if (BPF_CLASS(insn->code) != BPF_ALU64) {
+               /* 32-bit ALU ops are (32,32)->32 */
+               coerce_reg_to_size(dst_reg, 4);
+               coerce_reg_to_size(&src_reg, 4);
+       }
+
        __reg_deduce_bounds(dst_reg);
        __reg_bound_offset(dst_reg);
        return 0;
@@ -2811,7 +2881,6 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
        struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
        struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
        u8 opcode = BPF_OP(insn->code);
-       int rc;
 
        dst_reg = &regs[insn->dst_reg];
        src_reg = NULL;
@@ -2822,43 +2891,29 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                if (src_reg->type != SCALAR_VALUE) {
                        if (dst_reg->type != SCALAR_VALUE) {
                                /* Combining two pointers by any ALU op yields
-                                * an arbitrary scalar.
+                                * an arbitrary scalar. Disallow all math except
+                                * pointer subtraction
                                 */
-                               if (!env->allow_ptr_leaks) {
-                                       verbose(env, "R%d pointer %s pointer prohibited\n",
-                                               insn->dst_reg,
-                                               bpf_alu_string[opcode >> 4]);
-                                       return -EACCES;
+                               if (opcode == BPF_SUB){
+                                       mark_reg_unknown(env, regs, insn->dst_reg);
+                                       return 0;
                                }
-                               mark_reg_unknown(env, regs, insn->dst_reg);
-                               return 0;
+                               verbose(env, "R%d pointer %s pointer prohibited\n",
+                                       insn->dst_reg,
+                                       bpf_alu_string[opcode >> 4]);
+                               return -EACCES;
                        } else {
                                /* scalar += pointer
                                 * This is legal, but we have to reverse our
                                 * src/dest handling in computing the range
                                 */
-                               rc = adjust_ptr_min_max_vals(env, insn,
-                                                            src_reg, dst_reg);
-                               if (rc == -EACCES && env->allow_ptr_leaks) {
-                                       /* scalar += unknown scalar */
-                                       __mark_reg_unknown(&off_reg);
-                                       return adjust_scalar_min_max_vals(
-                                                       env, insn,
-                                                       dst_reg, off_reg);
-                               }
-                               return rc;
+                               return adjust_ptr_min_max_vals(env, insn,
+                                                              src_reg, dst_reg);
                        }
                } else if (ptr_reg) {
                        /* pointer += scalar */
-                       rc = adjust_ptr_min_max_vals(env, insn,
-                                                    dst_reg, src_reg);
-                       if (rc == -EACCES && env->allow_ptr_leaks) {
-                               /* unknown scalar += scalar */
-                               __mark_reg_unknown(dst_reg);
-                               return adjust_scalar_min_max_vals(
-                                               env, insn, dst_reg, *src_reg);
-                       }
-                       return rc;
+                       return adjust_ptr_min_max_vals(env, insn,
+                                                      dst_reg, src_reg);
                }
        } else {
                /* Pretend the src is a reg with a known value, since we only
@@ -2867,17 +2922,9 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                off_reg.type = SCALAR_VALUE;
                __mark_reg_known(&off_reg, insn->imm);
                src_reg = &off_reg;
-               if (ptr_reg) { /* pointer += K */
-                       rc = adjust_ptr_min_max_vals(env, insn,
-                                                    ptr_reg, src_reg);
-                       if (rc == -EACCES && env->allow_ptr_leaks) {
-                               /* unknown scalar += K */
-                               __mark_reg_unknown(dst_reg);
-                               return adjust_scalar_min_max_vals(
-                                               env, insn, dst_reg, off_reg);
-                       }
-                       return rc;
-               }
+               if (ptr_reg) /* pointer += K */
+                       return adjust_ptr_min_max_vals(env, insn,
+                                                      ptr_reg, src_reg);
        }
 
        /* Got here implies adding two SCALAR_VALUEs */
@@ -2974,17 +3021,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                        return -EACCES;
                                }
                                mark_reg_unknown(env, regs, insn->dst_reg);
-                               /* high 32 bits are known zero. */
-                               regs[insn->dst_reg].var_off = tnum_cast(
-                                               regs[insn->dst_reg].var_off, 4);
-                               __update_reg_bounds(&regs[insn->dst_reg]);
+                               coerce_reg_to_size(&regs[insn->dst_reg], 4);
                        }
                } else {
                        /* case: R = imm
                         * remember the value we stored into this reg
                         */
                        regs[insn->dst_reg].type = SCALAR_VALUE;
-                       __mark_reg_known(regs + insn->dst_reg, insn->imm);
+                       if (BPF_CLASS(insn->code) == BPF_ALU64) {
+                               __mark_reg_known(regs + insn->dst_reg,
+                                                insn->imm);
+                       } else {
+                               __mark_reg_known(regs + insn->dst_reg,
+                                                (u32)insn->imm);
+                       }
                }
 
        } else if (opcode > BPF_END) {
@@ -4061,15 +4111,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
                        return range_within(rold, rcur) &&
                               tnum_in(rold->var_off, rcur->var_off);
                } else {
-                       /* if we knew anything about the old value, we're not
-                        * equal, because we can't know anything about the
-                        * scalar value of the pointer in the new value.
+                       /* We're trying to use a pointer in place of a scalar.
+                        * Even if the scalar was unbounded, this could lead to
+                        * pointer leaks because scalars are allowed to leak
+                        * while pointers are not. We could make this safe in
+                        * special cases if root is calling us, but it's
+                        * probably not worth the hassle.
                         */
-                       return rold->umin_value == 0 &&
-                              rold->umax_value == U64_MAX &&
-                              rold->smin_value == S64_MIN &&
-                              rold->smax_value == S64_MAX &&
-                              tnum_is_unknown(rold->var_off);
+                       return false;
                }
        case PTR_TO_MAP_VALUE:
                /* If the new min/max/var_off satisfy the old ones and
index 13d6881f908b7f91a5669264a060f59d7990f801..ec999f32c84058a0624d55ff3ee26a4fac63eb57 100644 (file)
@@ -434,17 +434,22 @@ static struct pid *good_sigevent(sigevent_t * event)
 {
        struct task_struct *rtn = current->group_leader;
 
-       if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
-               (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
-                !same_thread_group(rtn, current) ||
-                (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
+       switch (event->sigev_notify) {
+       case SIGEV_SIGNAL | SIGEV_THREAD_ID:
+               rtn = find_task_by_vpid(event->sigev_notify_thread_id);
+               if (!rtn || !same_thread_group(rtn, current))
+                       return NULL;
+               /* FALLTHRU */
+       case SIGEV_SIGNAL:
+       case SIGEV_THREAD:
+               if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
+                       return NULL;
+               /* FALLTHRU */
+       case SIGEV_NONE:
+               return task_pid(rtn);
+       default:
                return NULL;
-
-       if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
-           ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
-               return NULL;
-
-       return task_pid(rtn);
+       }
 }
 
 static struct k_itimer * alloc_posix_timer(void)
@@ -669,7 +674,7 @@ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
        struct timespec64 ts64;
        bool sig_none;
 
-       sig_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
+       sig_none = timr->it_sigev_notify == SIGEV_NONE;
        iv = timr->it_interval;
 
        /* interval timer ? */
@@ -856,7 +861,7 @@ int common_timer_set(struct k_itimer *timr, int flags,
 
        timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
        expires = timespec64_to_ktime(new_setting->it_value);
-       sigev_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
+       sigev_none = timr->it_sigev_notify == SIGEV_NONE;
 
        kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
        timr->it_active = !sigev_none;
index aa8812ae6776ee31712fe88c58da4048ff9c31e4..9e97480892709957e127e9941710ce45f11ff724 100644 (file)
@@ -435,6 +435,41 @@ loop:
        return 0;
 }
 
+static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self)
+{
+       struct bpf_insn *insn;
+
+       insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       /* Due to func address being non-const, we need to
+        * assemble this here.
+        */
+       insn[0] = BPF_MOV64_REG(R6, R1);
+       insn[1] = BPF_LD_ABS(BPF_B, 0);
+       insn[2] = BPF_LD_ABS(BPF_H, 0);
+       insn[3] = BPF_LD_ABS(BPF_W, 0);
+       insn[4] = BPF_MOV64_REG(R7, R6);
+       insn[5] = BPF_MOV64_IMM(R6, 0);
+       insn[6] = BPF_MOV64_REG(R1, R7);
+       insn[7] = BPF_MOV64_IMM(R2, 1);
+       insn[8] = BPF_MOV64_IMM(R3, 2);
+       insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                              bpf_skb_vlan_push_proto.func - __bpf_call_base);
+       insn[10] = BPF_MOV64_REG(R6, R7);
+       insn[11] = BPF_LD_ABS(BPF_B, 0);
+       insn[12] = BPF_LD_ABS(BPF_H, 0);
+       insn[13] = BPF_LD_ABS(BPF_W, 0);
+       insn[14] = BPF_MOV64_IMM(R0, 42);
+       insn[15] = BPF_EXIT_INSN();
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = 16;
+
+       return 0;
+}
+
 static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
 {
        unsigned int len = BPF_MAXINSNS;
@@ -6066,6 +6101,14 @@ static struct bpf_test tests[] = {
                {},
                { {0x1, 0x42 } },
        },
+       {
+               "LD_ABS with helper changing skb data",
+               { },
+               INTERNAL,
+               { 0x34 },
+               { { ETH_HLEN, 42 } },
+               .fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
+       },
 };
 
 static struct net_device dev;
index 84b2dc76f140e922e2ed0d7c4d545b4d4ddf496d..b5f940ce0143ba061a183db0df3ef0dc17f57c72 100644 (file)
@@ -882,13 +882,10 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
        if (IS_ERR(dev))
                return PTR_ERR(dev);
 
-       if (bdi_debug_register(bdi, dev_name(dev))) {
-               device_destroy(bdi_class, dev->devt);
-               return -ENOMEM;
-       }
        cgwb_bdi_register(bdi);
        bdi->dev = dev;
 
+       bdi_debug_register(bdi, dev_name(dev));
        set_bit(WB_registered, &bdi->wb.state);
 
        spin_lock_bh(&bdi_lock);
index b73b96a2854b152eebf8e9a5c68caa892318c6e6..c44f6515be5ecb20c763d13a652e0f8c006e7ef2 100644 (file)
@@ -1,3 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
+#
+# Marek Lindner, Simon Wunderlich
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of version 2 of the GNU General Public
+# License as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+
 #
 # B.A.T.M.A.N meshing protocol
 #
index 915987bc6d294e5450a5439e6b854267763f3370..022f6e77307b86fa00377dd37af187757b1a0ba9 100644 (file)
@@ -1,4 +1,4 @@
-#
+# SPDX-License-Identifier: GPL-2.0
 # Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
 #
 # Marek Lindner, Simon Wunderlich
index 44fd073b7546c47ceb25238c5686aad2967bc15f..80c72c7d3cad332769056169cd8d1c2aba9bf1bd 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -37,7 +38,8 @@ char batadv_routing_algo[20] = "BATMAN_IV";
 static struct hlist_head batadv_algo_list;
 
 /**
- * batadv_algo_init - Initialize batman-adv algorithm management data structures
+ * batadv_algo_init() - Initialize batman-adv algorithm management data
+ *  structures
  */
 void batadv_algo_init(void)
 {
@@ -59,6 +61,12 @@ static struct batadv_algo_ops *batadv_algo_get(char *name)
        return bat_algo_ops;
 }
 
+/**
+ * batadv_algo_register() - Register callbacks for a mesh algorithm
+ * @bat_algo_ops: mesh algorithm callbacks to add
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
 {
        struct batadv_algo_ops *bat_algo_ops_tmp;
@@ -88,6 +96,19 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
        return 0;
 }
 
+/**
+ * batadv_algo_select() - Select algorithm of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @name: name of the algorithm to select
+ *
+ * The algorithm callbacks for the soft interface will be set when the algorithm
+ * with the correct name was found. Any previous selected algorithm will not be
+ * deinitialized and the new selected algorithm will also not be initialized.
+ * It is therefore not allowed to call batadv_algo_select outside the creation
+ * function of the soft interface.
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
 {
        struct batadv_algo_ops *bat_algo_ops;
@@ -102,6 +123,14 @@ int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_algo_seq_print_text() - Print the supported algorithms in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct batadv_algo_ops *bat_algo_ops;
@@ -148,7 +177,7 @@ module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
                0644);
 
 /**
- * batadv_algo_dump_entry - fill in information about one supported routing
+ * batadv_algo_dump_entry() - fill in information about one supported routing
  *  algorithm
  * @msg: netlink message to be sent back
  * @portid: Port to reply to
@@ -179,7 +208,7 @@ static int batadv_algo_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_algo_dump - fill in information about supported routing
+ * batadv_algo_dump() - fill in information about supported routing
  *  algorithms
  * @msg: netlink message to be sent back
  * @cb: Parameters to the netlink request
index 29f6312f9bf13ff0980b965f34465df5ae59265e..029221615ba3d59a6b5d47b78c16c7c8555ba7a6 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Linus Lüssing
index bbe8414b6ee7d21f86e5ab9302ebfc875dbe33d3..79e32638372663314e1f47590b74ec7482d1fb8d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -26,7 +27,7 @@
 #include <linux/cache.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/jiffies.h>
@@ -51,6 +52,7 @@
 #include <linux/workqueue.h>
 #include <net/genetlink.h>
 #include <net/netlink.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -62,7 +64,6 @@
 #include "netlink.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "translation-table.h"
@@ -72,21 +73,28 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work);
 
 /**
  * enum batadv_dup_status - duplicate status
- * @BATADV_NO_DUP: the packet is no duplicate
- * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
- *  neighbor)
- * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
- * @BATADV_PROTECTED: originator is currently protected (after reboot)
  */
 enum batadv_dup_status {
+       /** @BATADV_NO_DUP: the packet is no duplicate */
        BATADV_NO_DUP = 0,
+
+       /**
+        * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for
+        *  the neighbor)
+        */
        BATADV_ORIG_DUP,
+
+       /** @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor */
        BATADV_NEIGH_DUP,
+
+       /**
+        * @BATADV_PROTECTED: originator is currently protected (after reboot)
+        */
        BATADV_PROTECTED,
 };
 
 /**
- * batadv_ring_buffer_set - update the ring buffer with the given value
+ * batadv_ring_buffer_set() - update the ring buffer with the given value
  * @lq_recv: pointer to the ring buffer
  * @lq_index: index to store the value at
  * @value: value to store in the ring buffer
@@ -98,7 +106,7 @@ static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value)
 }
 
 /**
- * batadv_ring_buffer_avg - compute the average of all non-zero values stored
+ * batadv_ring_buffer_avg() - compute the average of all non-zero values stored
  * in the given ring buffer
  * @lq_recv: pointer to the ring buffer
  *
@@ -130,7 +138,7 @@ static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
 }
 
 /**
- * batadv_iv_ogm_orig_free - free the private resources allocated for this
+ * batadv_iv_ogm_orig_free() - free the private resources allocated for this
  *  orig_node
  * @orig_node: the orig_node for which the resources have to be free'd
  */
@@ -141,8 +149,8 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_iv_ogm_orig_add_if - change the private structures of the orig_node to
- *  include the new hard-interface
+ * batadv_iv_ogm_orig_add_if() - change the private structures of the orig_node
+ *  to include the new hard-interface
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  *
@@ -186,7 +194,7 @@ unlock:
 }
 
 /**
- * batadv_iv_ogm_drop_bcast_own_entry - drop section of bcast_own
+ * batadv_iv_ogm_drop_bcast_own_entry() - drop section of bcast_own
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
@@ -224,7 +232,7 @@ batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_drop_bcast_own_sum_entry - drop section of bcast_own_sum
+ * batadv_iv_ogm_drop_bcast_own_sum_entry() - drop section of bcast_own_sum
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
@@ -259,8 +267,8 @@ batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_orig_del_if - change the private structures of the orig_node to
- *  exclude the removed interface
+ * batadv_iv_ogm_orig_del_if() - change the private structures of the orig_node
+ *  to exclude the removed interface
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
@@ -290,7 +298,8 @@ static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_orig_get - retrieve or create (if does not exist) an originator
+ * batadv_iv_ogm_orig_get() - retrieve or create (if does not exist) an
+ *  originator
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: mac address of the originator
  *
@@ -447,7 +456,7 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_iv_ogm_aggr_packet - checks if there is another OGM attached
+ * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
  * @tvlv_len: tvlv length of the previously considered OGM
@@ -557,7 +566,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
 }
 
 /**
- * batadv_iv_ogm_can_aggregate - find out if an OGM can be aggregated on an
+ * batadv_iv_ogm_can_aggregate() - find out if an OGM can be aggregated on an
  *  existing forward packet
  * @new_bat_ogm_packet: OGM packet to be aggregated
  * @bat_priv: the bat priv with all the soft interface information
@@ -660,7 +669,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_aggregate_new - create a new aggregated packet and add this
+ * batadv_iv_ogm_aggregate_new() - create a new aggregated packet and add this
  *  packet to it.
  * @packet_buff: pointer to the OGM
  * @packet_len: (total) length of the OGM
@@ -743,7 +752,7 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
 }
 
 /**
- * batadv_iv_ogm_queue_add - queue up an OGM for transmission
+ * batadv_iv_ogm_queue_add() - queue up an OGM for transmission
  * @bat_priv: the bat priv with all the soft interface information
  * @packet_buff: pointer to the OGM
  * @packet_len: (total) length of the OGM
@@ -869,8 +878,8 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_slide_own_bcast_window - bitshift own OGM broadcast windows for
- * the given interface
+ * batadv_iv_ogm_slide_own_bcast_window() - bitshift own OGM broadcast windows
+ *  for the given interface
  * @hard_iface: the interface for which the windows have to be shifted
  */
 static void
@@ -987,7 +996,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_orig_update - use OGM to update corresponding data in an
+ * batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an
  *  originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the orig node who originally emitted the ogm packet
@@ -1152,7 +1161,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_calc_tq - calculate tq for current received ogm packet
+ * batadv_iv_ogm_calc_tq() - calculate tq for current received ogm packet
  * @orig_node: the orig node who originally emitted the ogm packet
  * @orig_neigh_node: the orig node struct of the neighbor who sent the packet
  * @batadv_ogm_packet: the ogm packet
@@ -1298,7 +1307,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_update_seqnos -  process a batman packet for all interfaces,
+ * batadv_iv_ogm_update_seqnos() -  process a batman packet for all interfaces,
  *  adjust the sequence number and find out whether it is a duplicate
  * @ethhdr: ethernet header of the packet
  * @batadv_ogm_packet: OGM packet to be considered
@@ -1401,7 +1410,8 @@ out:
 }
 
 /**
- * batadv_iv_ogm_process_per_outif - process a batman iv OGM for an outgoing if
+ * batadv_iv_ogm_process_per_outif() - process a batman iv OGM for an outgoing
+ *  interface
  * @skb: the skb containing the OGM
  * @ogm_offset: offset from skb->data to start of ogm header
  * @orig_node: the (cached) orig node for the originator of this OGM
@@ -1608,7 +1618,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_process - process an incoming batman iv OGM
+ * batadv_iv_ogm_process() - process an incoming batman iv OGM
  * @skb: the skb containing the OGM
  * @ogm_offset: offset to the OGM which should be processed (for aggregates)
  * @if_incoming: the interface where this packet was receved
@@ -1861,7 +1871,7 @@ free_skb:
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_iv_ogm_orig_print_neigh - print neighbors for the originator table
+ * batadv_iv_ogm_orig_print_neigh() - print neighbors for the originator table
  * @orig_node: the orig_node for which the neighbors are printed
  * @if_outgoing: outgoing interface for these entries
  * @seq: debugfs table seq_file struct
@@ -1890,7 +1900,7 @@ batadv_iv_ogm_orig_print_neigh(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_orig_print - print the originator table
+ * batadv_iv_ogm_orig_print() - print the originator table
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: debugfs table seq_file struct
  * @if_outgoing: the outgoing interface for which this should be printed
@@ -1960,7 +1970,7 @@ next:
 #endif
 
 /**
- * batadv_iv_ogm_neigh_get_tq_avg - Get the TQ average for a neighbour on a
+ * batadv_iv_ogm_neigh_get_tq_avg() - Get the TQ average for a neighbour on a
  *  given outgoing interface.
  * @neigh_node: Neighbour of interest
  * @if_outgoing: Outgoing interface of interest
@@ -1986,7 +1996,7 @@ batadv_iv_ogm_neigh_get_tq_avg(struct batadv_neigh_node *neigh_node,
 }
 
 /**
- * batadv_iv_ogm_orig_dump_subentry - Dump an originator subentry into a
+ * batadv_iv_ogm_orig_dump_subentry() - Dump an originator subentry into a
  *  message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -2048,7 +2058,7 @@ batadv_iv_ogm_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_orig_dump_entry - Dump an originator entry into a message
+ * batadv_iv_ogm_orig_dump_entry() - Dump an originator entry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2110,7 +2120,7 @@ batadv_iv_ogm_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_orig_dump_bucket - Dump an originator bucket into a
+ * batadv_iv_ogm_orig_dump_bucket() - Dump an originator bucket into a
  *  message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -2153,7 +2163,7 @@ batadv_iv_ogm_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_orig_dump - Dump the originators into a message
+ * batadv_iv_ogm_orig_dump() - Dump the originators into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -2190,7 +2200,7 @@ batadv_iv_ogm_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_iv_hardif_neigh_print - print a single hop neighbour node
+ * batadv_iv_hardif_neigh_print() - print a single hop neighbour node
  * @seq: neighbour table seq_file struct
  * @hardif_neigh: hardif neighbour information
  */
@@ -2209,7 +2219,7 @@ batadv_iv_hardif_neigh_print(struct seq_file *seq,
 }
 
 /**
- * batadv_iv_ogm_neigh_print - print the single hop neighbour list
+ * batadv_iv_ogm_neigh_print() - print the single hop neighbour list
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: neighbour table seq_file struct
  */
@@ -2242,7 +2252,7 @@ static void batadv_iv_neigh_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_iv_ogm_neigh_diff - calculate tq difference of two neighbors
+ * batadv_iv_ogm_neigh_diff() - calculate tq difference of two neighbors
  * @neigh1: the first neighbor object of the comparison
  * @if_outgoing1: outgoing interface for the first neighbor
  * @neigh2: the second neighbor object of the comparison
@@ -2287,7 +2297,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_neigh_dump_neigh - Dump a neighbour into a netlink message
+ * batadv_iv_ogm_neigh_dump_neigh() - Dump a neighbour into a netlink message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2326,7 +2336,7 @@ batadv_iv_ogm_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_neigh_dump_hardif - Dump the neighbours of a hard interface
+ * batadv_iv_ogm_neigh_dump_hardif() - Dump the neighbours of a hard interface
  *  into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -2365,7 +2375,7 @@ batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_neigh_dump - Dump the neighbours into a message
+ * batadv_iv_ogm_neigh_dump() - Dump the neighbours into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -2417,7 +2427,7 @@ batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
 }
 
 /**
- * batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors
+ * batadv_iv_ogm_neigh_cmp() - compare the metrics of two neighbors
  * @neigh1: the first neighbor object of the comparison
  * @if_outgoing1: outgoing interface for the first neighbor
  * @neigh2: the second neighbor object of the comparison
@@ -2443,7 +2453,7 @@ static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
 }
 
 /**
- * batadv_iv_ogm_neigh_is_sob - check if neigh1 is similarly good or better
+ * batadv_iv_ogm_neigh_is_sob() - check if neigh1 is similarly good or better
  *  than neigh2 from the metric prospective
  * @neigh1: the first neighbor object of the comparison
  * @if_outgoing1: outgoing interface for the first neighbor
@@ -2478,7 +2488,7 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_iv_init_sel_class - initialize GW selection class
+ * batadv_iv_init_sel_class() - initialize GW selection class
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
@@ -2703,7 +2713,7 @@ static void batadv_iv_gw_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_iv_gw_dump_entry - Dump a gateway into a message
+ * batadv_iv_gw_dump_entry() - Dump a gateway into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2774,7 +2784,7 @@ out:
 }
 
 /**
- * batadv_iv_gw_dump - Dump gateways into a message
+ * batadv_iv_gw_dump() - Dump gateways into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -2843,6 +2853,11 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
        },
 };
 
+/**
+ * batadv_iv_init() - B.A.T.M.A.N. IV initialization function
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int __init batadv_iv_init(void)
 {
        int ret;
index ae2ab526bdb1e95ef2ee5fc27fb59e38d3b462d2..9dc0dd5c83df40af56cabad17de86002b8701198 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
index e0e2bfcd6b3efd73f2567d23ce304951c9e2c3f0..27e165ac9302bc365e1f81034a1c1353e991b724 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
@@ -36,6 +37,7 @@
 #include <linux/workqueue.h>
 #include <net/genetlink.h>
 #include <net/netlink.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -48,7 +50,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 
 struct sk_buff;
 
@@ -99,7 +100,7 @@ static void batadv_v_primary_iface_set(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_iface_update_mac - react to hard-interface MAC address change
+ * batadv_v_iface_update_mac() - react to hard-interface MAC address change
  * @hard_iface: the modified interface
  *
  * If the modified interface is the primary one, update the originator
@@ -130,7 +131,7 @@ batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_v_orig_print_neigh - print neighbors for the originator table
+ * batadv_v_orig_print_neigh() - print neighbors for the originator table
  * @orig_node: the orig_node for which the neighbors are printed
  * @if_outgoing: outgoing interface for these entries
  * @seq: debugfs table seq_file struct
@@ -160,7 +161,7 @@ batadv_v_orig_print_neigh(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_v_hardif_neigh_print - print a single ELP neighbour node
+ * batadv_v_hardif_neigh_print() - print a single ELP neighbour node
  * @seq: neighbour table seq_file struct
  * @hardif_neigh: hardif neighbour information
  */
@@ -181,7 +182,7 @@ batadv_v_hardif_neigh_print(struct seq_file *seq,
 }
 
 /**
- * batadv_v_neigh_print - print the single hop neighbour list
+ * batadv_v_neigh_print() - print the single hop neighbour list
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: neighbour table seq_file struct
  */
@@ -215,7 +216,7 @@ static void batadv_v_neigh_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_v_neigh_dump_neigh - Dump a neighbour into a message
+ * batadv_v_neigh_dump_neigh() - Dump a neighbour into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -258,7 +259,7 @@ batadv_v_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_neigh_dump_hardif - Dump the  neighbours of a hard interface  into
+ * batadv_v_neigh_dump_hardif() - Dump the  neighbours of a hard interface into
  *  a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -296,7 +297,7 @@ batadv_v_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_neigh_dump - Dump the neighbours of a hard interface  into a
+ * batadv_v_neigh_dump() - Dump the neighbours of a hard interface  into a
  *  message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
@@ -348,7 +349,7 @@ batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_v_orig_print - print the originator table
+ * batadv_v_orig_print() - print the originator table
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: debugfs table seq_file struct
  * @if_outgoing: the outgoing interface for which this should be printed
@@ -416,8 +417,7 @@ next:
 #endif
 
 /**
- * batadv_v_orig_dump_subentry - Dump an originator subentry into a
- *  message
+ * batadv_v_orig_dump_subentry() - Dump an originator subentry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -483,7 +483,7 @@ batadv_v_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_orig_dump_entry - Dump an originator entry into a message
+ * batadv_v_orig_dump_entry() - Dump an originator entry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -536,8 +536,7 @@ batadv_v_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_orig_dump_bucket - Dump an originator bucket into a
- *  message
+ * batadv_v_orig_dump_bucket() - Dump an originator bucket into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -578,7 +577,7 @@ batadv_v_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_orig_dump - Dump the originators into a message
+ * batadv_v_orig_dump() - Dump the originators into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -668,7 +667,7 @@ err_ifinfo1:
 }
 
 /**
- * batadv_v_init_sel_class - initialize GW selection class
+ * batadv_v_init_sel_class() - initialize GW selection class
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
@@ -704,7 +703,7 @@ static ssize_t batadv_v_show_sel_class(struct batadv_priv *bat_priv, char *buff)
 }
 
 /**
- * batadv_v_gw_throughput_get - retrieve the GW-bandwidth for a given GW
+ * batadv_v_gw_throughput_get() - retrieve the GW-bandwidth for a given GW
  * @gw_node: the GW to retrieve the metric for
  * @bw: the pointer where the metric will be stored. The metric is computed as
  *  the minimum between the GW advertised throughput and the path throughput to
@@ -747,7 +746,7 @@ out:
 }
 
 /**
- * batadv_v_gw_get_best_gw_node - retrieve the best GW node
+ * batadv_v_gw_get_best_gw_node() - retrieve the best GW node
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: the GW node having the best GW-metric, NULL if no GW is known
@@ -785,7 +784,7 @@ next:
 }
 
 /**
- * batadv_v_gw_is_eligible - check if a originator would be selected as GW
+ * batadv_v_gw_is_eligible() - check if a originator would be selected as GW
  * @bat_priv: the bat priv with all the soft interface information
  * @curr_gw_orig: originator representing the currently selected GW
  * @orig_node: the originator representing the new candidate
@@ -884,7 +883,7 @@ out:
 }
 
 /**
- * batadv_v_gw_print - print the gateway list
+ * batadv_v_gw_print() - print the gateway list
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: gateway table seq_file struct
  */
@@ -913,7 +912,7 @@ static void batadv_v_gw_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_v_gw_dump_entry - Dump a gateway into a message
+ * batadv_v_gw_dump_entry() - Dump a gateway into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -1004,7 +1003,7 @@ out:
 }
 
 /**
- * batadv_v_gw_dump - Dump gateways into a message
+ * batadv_v_gw_dump() - Dump gateways into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -1074,7 +1073,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
 };
 
 /**
- * batadv_v_hardif_init - initialize the algorithm specific fields in the
+ * batadv_v_hardif_init() - initialize the algorithm specific fields in the
  *  hard-interface object
  * @hard_iface: the hard-interface to initialize
  */
@@ -1088,7 +1087,7 @@ void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_mesh_init - initialize the B.A.T.M.A.N. V private resources for a
+ * batadv_v_mesh_init() - initialize the B.A.T.M.A.N. V private resources for a
  *  mesh
  * @bat_priv: the object representing the mesh interface to initialise
  *
@@ -1106,7 +1105,7 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_mesh_free - free the B.A.T.M.A.N. V private resources for a mesh
+ * batadv_v_mesh_free() - free the B.A.T.M.A.N. V private resources for a mesh
  * @bat_priv: the object representing the mesh interface to free
  */
 void batadv_v_mesh_free(struct batadv_priv *bat_priv)
@@ -1115,7 +1114,7 @@ void batadv_v_mesh_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_init - B.A.T.M.A.N. V initialization function
+ * batadv_v_init() - B.A.T.M.A.N. V initialization function
  *
  * Description: Takes care of initializing all the subcomponents.
  * It is invoked upon module load only.
index dd7c4b647e6b4a1325674d820fb7ca6314f437e2..a17ab68bbce8ec1bda67b68102b0c313fdb1e9b7 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Linus Lüssing
index 1de992c58b351f09fd34df79c3b8b30acb2a9685..a83478c4659701630bce792575f2cb0bc39bba86 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
@@ -24,7 +25,7 @@
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
 #include <net/cfg80211.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_algo.h"
 #include "bat_v_ogm.h"
 #include "hard-interface.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 
 /**
- * batadv_v_elp_start_timer - restart timer for ELP periodic work
+ * batadv_v_elp_start_timer() - restart timer for ELP periodic work
  * @hard_iface: the interface for which the timer has to be reset
  */
 static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
@@ -67,7 +68,7 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_elp_get_throughput - get the throughput towards a neighbour
+ * batadv_v_elp_get_throughput() - get the throughput towards a neighbour
  * @neigh: the neighbour for which the throughput has to be obtained
  *
  * Return: The throughput towards the given neighbour in multiples of 100kpbs
@@ -153,8 +154,8 @@ default_throughput:
 }
 
 /**
- * batadv_v_elp_throughput_metric_update - worker updating the throughput metric
- *  of a single hop neighbour
+ * batadv_v_elp_throughput_metric_update() - worker updating the throughput
+ *  metric of a single hop neighbour
  * @work: the work queue item
  */
 void batadv_v_elp_throughput_metric_update(struct work_struct *work)
@@ -177,7 +178,7 @@ void batadv_v_elp_throughput_metric_update(struct work_struct *work)
 }
 
 /**
- * batadv_v_elp_wifi_neigh_probe - send link probing packets to a neighbour
+ * batadv_v_elp_wifi_neigh_probe() - send link probing packets to a neighbour
  * @neigh: the neighbour to probe
  *
  * Sends a predefined number of unicast wifi packets to a given neighbour in
@@ -240,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
 }
 
 /**
- * batadv_v_elp_periodic_work - ELP periodic task per interface
+ * batadv_v_elp_periodic_work() - ELP periodic task per interface
  * @work: work queue item
  *
  * Emits broadcast ELP message in regular intervals.
@@ -327,7 +328,7 @@ out:
 }
 
 /**
- * batadv_v_elp_iface_enable - setup the ELP interface private resources
+ * batadv_v_elp_iface_enable() - setup the ELP interface private resources
  * @hard_iface: interface for which the data has to be prepared
  *
  * Return: 0 on success or a -ENOMEM in case of failure.
@@ -375,7 +376,7 @@ out:
 }
 
 /**
- * batadv_v_elp_iface_disable - release ELP interface private resources
+ * batadv_v_elp_iface_disable() - release ELP interface private resources
  * @hard_iface: interface for which the resources have to be released
  */
 void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface)
@@ -387,7 +388,7 @@ void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_elp_iface_activate - update the ELP buffer belonging to the given
+ * batadv_v_elp_iface_activate() - update the ELP buffer belonging to the given
  *  hard-interface
  * @primary_iface: the new primary interface
  * @hard_iface: interface holding the to-be-updated buffer
@@ -408,7 +409,7 @@ void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface,
 }
 
 /**
- * batadv_v_elp_primary_iface_set - change internal data to reflect the new
+ * batadv_v_elp_primary_iface_set() - change internal data to reflect the new
  *  primary interface
  * @primary_iface: the new primary interface
  */
@@ -428,7 +429,7 @@ void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface)
 }
 
 /**
- * batadv_v_elp_neigh_update - update an ELP neighbour node
+ * batadv_v_elp_neigh_update() - update an ELP neighbour node
  * @bat_priv: the bat priv with all the soft interface information
  * @neigh_addr: the neighbour interface address
  * @if_incoming: the interface the packet was received through
@@ -488,7 +489,7 @@ orig_free:
 }
 
 /**
- * batadv_v_elp_packet_recv - main ELP packet handler
+ * batadv_v_elp_packet_recv() - main ELP packet handler
  * @skb: the received packet
  * @if_incoming: the interface this packet was received through
  *
index 376ead280ab9e40e1283fbf4be5dbfe803a45015..5e39d0588a48a4c9c7fd8d2bdf0036f4869fa0ed 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
index c251445a42a076fcdb06a25b2ddbe67bbb1ac052..ba59b77c605d6abd4d2a37fbd2792bd70ee288c4 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
@@ -22,7 +23,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_algo.h"
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "translation-table.h"
 #include "tvlv.h"
 
 /**
- * batadv_v_ogm_orig_get - retrieve and possibly create an originator node
+ * batadv_v_ogm_orig_get() - retrieve and possibly create an originator node
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address of the originator
  *
@@ -88,7 +89,7 @@ struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_start_timer - restart the OGM sending timer
+ * batadv_v_ogm_start_timer() - restart the OGM sending timer
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
@@ -107,7 +108,7 @@ static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_ogm_send_to_if - send a batman ogm using a given interface
+ * batadv_v_ogm_send_to_if() - send a batman ogm using a given interface
  * @skb: the OGM to send
  * @hard_iface: the interface to use to send the OGM
  */
@@ -127,7 +128,7 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
 }
 
 /**
- * batadv_v_ogm_send - periodic worker broadcasting the own OGM
+ * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
  * @work: work queue item
  */
 static void batadv_v_ogm_send(struct work_struct *work)
@@ -235,7 +236,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_iface_enable - prepare an interface for B.A.T.M.A.N. V
+ * batadv_v_ogm_iface_enable() - prepare an interface for B.A.T.M.A.N. V
  * @hard_iface: the interface to prepare
  *
  * Takes care of scheduling own OGM sending routine for this interface.
@@ -252,7 +253,7 @@ int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_ogm_primary_iface_set - set a new primary interface
+ * batadv_v_ogm_primary_iface_set() - set a new primary interface
  * @primary_iface: the new primary interface
  */
 void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
@@ -268,8 +269,8 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
 }
 
 /**
- * batadv_v_forward_penalty - apply a penalty to the throughput metric forwarded
- *  with B.A.T.M.A.N. V OGMs
+ * batadv_v_forward_penalty() - apply a penalty to the throughput metric
+ *  forwarded with B.A.T.M.A.N. V OGMs
  * @bat_priv: the bat priv with all the soft interface information
  * @if_incoming: the interface where the OGM has been received
  * @if_outgoing: the interface where the OGM has to be forwarded to
@@ -314,7 +315,7 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_forward - check conditions and forward an OGM to the given
+ * batadv_v_ogm_forward() - check conditions and forward an OGM to the given
  *  outgoing interface
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm_received: previously received OGM to be forwarded
@@ -405,7 +406,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_metric_update - update route metric based on OGM
+ * batadv_v_ogm_metric_update() - update route metric based on OGM
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm2: OGM2 structure
  * @orig_node: Originator structure for which the OGM has been received
@@ -490,7 +491,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_route_update - update routes based on OGM
+ * batadv_v_ogm_route_update() - update routes based on OGM
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the Ethernet header of the OGM2
  * @ogm2: OGM2 structure
@@ -590,7 +591,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_process_per_outif - process a batman v OGM for an outgoing if
+ * batadv_v_ogm_process_per_outif() - process a batman v OGM for an outgoing if
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the Ethernet header of the OGM2
  * @ogm2: OGM2 structure
@@ -639,7 +640,7 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_aggr_packet - checks if there is another OGM aggregated
+ * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
  * @tvlv_len: tvlv length of the previously considered OGM
@@ -659,7 +660,7 @@ static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
 }
 
 /**
- * batadv_v_ogm_process - process an incoming batman v OGM
+ * batadv_v_ogm_process() - process an incoming batman v OGM
  * @skb: the skb containing the OGM
  * @ogm_offset: offset to the OGM which should be processed (for aggregates)
  * @if_incoming: the interface where this packet was receved
@@ -787,7 +788,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_packet_recv - OGM2 receiving handler
+ * batadv_v_ogm_packet_recv() - OGM2 receiving handler
  * @skb: the received OGM
  * @if_incoming: the interface where this OGM has been received
  *
@@ -851,7 +852,7 @@ free_skb:
 }
 
 /**
- * batadv_v_ogm_init - initialise the OGM2 engine
+ * batadv_v_ogm_init() - initialise the OGM2 engine
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or a negative error code in case of failure
@@ -884,7 +885,7 @@ int batadv_v_ogm_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_ogm_free - free OGM private resources
+ * batadv_v_ogm_free() - free OGM private resources
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_v_ogm_free(struct batadv_priv *bat_priv)
index 2068770b542dd58bb412ec6bc21b09f6fce3e554..6a4c14ccc3c61bbce96726db94baa1ae57505d5d 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
index 2b070c7e31da1862038063512c81f9b55fa98ccd..bdc1ef06e05b56c86c0bc9621ec22a42261719a8 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -32,7 +33,7 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
 }
 
 /**
- * batadv_bit_get_packet - receive and process one packet within the sequence
+ * batadv_bit_get_packet() - receive and process one packet within the sequence
  *  number window
  * @priv: the bat priv with all the soft interface information
  * @seq_bits: pointer to the sequence number receive packet
index cc262c9d97e0ca606aa666493956e5feedd18bb1..ca9d0753dd6b92bdfc332094b2d1776989270b39 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -26,7 +27,7 @@
 #include <linux/types.h>
 
 /**
- * batadv_test_bit - check if bit is set in the current window
+ * batadv_test_bit() - check if bit is set in the current window
  *
  * @seq_bits: pointer to the sequence number receive packet
  * @last_seqno: latest sequence number in seq_bits
@@ -46,7 +47,12 @@ static inline bool batadv_test_bit(const unsigned long *seq_bits,
        return test_bit(diff, seq_bits) != 0;
 }
 
-/* turn corresponding bit on, so we can remember that we got the packet */
+/**
+ * batadv_set_bit() - Turn corresponding bit on, so we can remember that we got
+ *  the packet
+ * @seq_bits: bitmap of the packet receive window
+ * @n: relative sequence number of newly received packet
+ */
 static inline void batadv_set_bit(unsigned long *seq_bits, s32 n)
 {
        /* if too old, just drop it */
index cdd8e8e4df0b382b21ff674b9aafcd19e9a581e7..fad47853ad3c5e0bae2ca2b404badc6797d7f1e9 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
@@ -24,7 +25,7 @@
 #include <linux/crc16.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
@@ -49,6 +50,7 @@
 #include <net/genetlink.h>
 #include <net/netlink.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "hard-interface.h"
@@ -56,7 +58,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "soft-interface.h"
 #include "sysfs.h"
 #include "translation-table.h"
@@ -69,7 +70,7 @@ batadv_bla_send_announce(struct batadv_priv *bat_priv,
                         struct batadv_bla_backbone_gw *backbone_gw);
 
 /**
- * batadv_choose_claim - choose the right bucket for a claim.
+ * batadv_choose_claim() - choose the right bucket for a claim.
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -87,7 +88,7 @@ static inline u32 batadv_choose_claim(const void *data, u32 size)
 }
 
 /**
- * batadv_choose_backbone_gw - choose the right bucket for a backbone gateway.
+ * batadv_choose_backbone_gw() - choose the right bucket for a backbone gateway.
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -105,7 +106,7 @@ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
 }
 
 /**
- * batadv_compare_backbone_gw - compare address and vid of two backbone gws
+ * batadv_compare_backbone_gw() - compare address and vid of two backbone gws
  * @node: list node of the first entry to compare
  * @data2: pointer to the second backbone gateway
  *
@@ -129,7 +130,7 @@ static bool batadv_compare_backbone_gw(const struct hlist_node *node,
 }
 
 /**
- * batadv_compare_claim - compare address and vid of two claims
+ * batadv_compare_claim() - compare address and vid of two claims
  * @node: list node of the first entry to compare
  * @data2: pointer to the second claims
  *
@@ -153,7 +154,7 @@ static bool batadv_compare_claim(const struct hlist_node *node,
 }
 
 /**
- * batadv_backbone_gw_release - release backbone gw from lists and queue for
+ * batadv_backbone_gw_release() - release backbone gw from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the backbone gw
  */
@@ -168,7 +169,7 @@ static void batadv_backbone_gw_release(struct kref *ref)
 }
 
 /**
- * batadv_backbone_gw_put - decrement the backbone gw refcounter and possibly
+ * batadv_backbone_gw_put() - decrement the backbone gw refcounter and possibly
  *  release it
  * @backbone_gw: backbone gateway to be free'd
  */
@@ -178,8 +179,8 @@ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_claim_release - release claim from lists and queue for free after rcu
- *  grace period
+ * batadv_claim_release() - release claim from lists and queue for free after
+ *  rcu grace period
  * @ref: kref pointer of the claim
  */
 static void batadv_claim_release(struct kref *ref)
@@ -204,8 +205,7 @@ static void batadv_claim_release(struct kref *ref)
 }
 
 /**
- * batadv_claim_put - decrement the claim refcounter and possibly
- *  release it
+ * batadv_claim_put() - decrement the claim refcounter and possibly release it
  * @claim: claim to be free'd
  */
 static void batadv_claim_put(struct batadv_bla_claim *claim)
@@ -214,7 +214,7 @@ static void batadv_claim_put(struct batadv_bla_claim *claim)
 }
 
 /**
- * batadv_claim_hash_find - looks for a claim in the claim hash
+ * batadv_claim_hash_find() - looks for a claim in the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @data: search data (may be local/static data)
  *
@@ -253,7 +253,7 @@ batadv_claim_hash_find(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_backbone_hash_find - looks for a backbone gateway in the hash
+ * batadv_backbone_hash_find() - looks for a backbone gateway in the hash
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address of the originator
  * @vid: the VLAN ID
@@ -297,7 +297,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
 }
 
 /**
- * batadv_bla_del_backbone_claims - delete all claims for a backbone
+ * batadv_bla_del_backbone_claims() - delete all claims for a backbone
  * @backbone_gw: backbone gateway where the claims should be removed
  */
 static void
@@ -337,7 +337,7 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_bla_send_claim - sends a claim frame according to the provided info
+ * batadv_bla_send_claim() - sends a claim frame according to the provided info
  * @bat_priv: the bat priv with all the soft interface information
  * @mac: the mac address to be announced within the claim
  * @vid: the VLAN ID
@@ -457,7 +457,7 @@ out:
 }
 
 /**
- * batadv_bla_loopdetect_report - worker for reporting the loop
+ * batadv_bla_loopdetect_report() - worker for reporting the loop
  * @work: work queue item
  *
  * Throws an uevent, as the loopdetect check function can't do that itself
@@ -487,7 +487,7 @@ static void batadv_bla_loopdetect_report(struct work_struct *work)
 }
 
 /**
- * batadv_bla_get_backbone_gw - finds or creates a backbone gateway
+ * batadv_bla_get_backbone_gw() - finds or creates a backbone gateway
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the mac address of the originator
  * @vid: the VLAN ID
@@ -560,7 +560,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
 }
 
 /**
- * batadv_bla_update_own_backbone_gw - updates the own backbone gw for a VLAN
+ * batadv_bla_update_own_backbone_gw() - updates the own backbone gw for a VLAN
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the selected primary interface
  * @vid: VLAN identifier
@@ -586,7 +586,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_answer_request - answer a bla request by sending own claims
+ * batadv_bla_answer_request() - answer a bla request by sending own claims
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: interface where the request came on
  * @vid: the vid where the request came on
@@ -636,7 +636,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_send_request - send a request to repeat claims
+ * batadv_bla_send_request() - send a request to repeat claims
  * @backbone_gw: the backbone gateway from whom we are out of sync
  *
  * When the crc is wrong, ask the backbone gateway for a full table update.
@@ -663,7 +663,7 @@ static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_bla_send_announce - Send an announcement frame
+ * batadv_bla_send_announce() - Send an announcement frame
  * @bat_priv: the bat priv with all the soft interface information
  * @backbone_gw: our backbone gateway which should be announced
  */
@@ -684,7 +684,7 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_add_claim - Adds a claim in the claim hash
+ * batadv_bla_add_claim() - Adds a claim in the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @mac: the mac address of the claim
  * @vid: the VLAN ID of the frame
@@ -774,7 +774,7 @@ claim_free_ref:
 }
 
 /**
- * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
+ * batadv_bla_claim_get_backbone_gw() - Get valid reference for backbone_gw of
  *  claim
  * @claim: claim whose backbone_gw should be returned
  *
@@ -794,7 +794,7 @@ batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
 }
 
 /**
- * batadv_bla_del_claim - delete a claim from the claim hash
+ * batadv_bla_del_claim() - delete a claim from the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @mac: mac address of the claim to be removed
  * @vid: VLAN id for the claim to be removed
@@ -822,7 +822,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_handle_announce - check for ANNOUNCE frame
+ * batadv_handle_announce() - check for ANNOUNCE frame
  * @bat_priv: the bat priv with all the soft interface information
  * @an_addr: announcement mac address (ARP Sender HW address)
  * @backbone_addr: originator address of the sender (Ethernet source MAC)
@@ -880,7 +880,7 @@ static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
 }
 
 /**
- * batadv_handle_request - check for REQUEST frame
+ * batadv_handle_request() - check for REQUEST frame
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
@@ -913,7 +913,7 @@ static bool batadv_handle_request(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_handle_unclaim - check for UNCLAIM frame
+ * batadv_handle_unclaim() - check for UNCLAIM frame
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @backbone_addr: originator address of the backbone (Ethernet source)
@@ -951,7 +951,7 @@ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_handle_claim - check for CLAIM frame
+ * batadv_handle_claim() - check for CLAIM frame
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @backbone_addr: originator address of the backbone (Ethernet Source)
@@ -988,7 +988,7 @@ static bool batadv_handle_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_check_claim_group - check for claim group membership
+ * batadv_check_claim_group() - check for claim group membership
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary interface of this batman interface
  * @hw_src: the Hardware source in the ARP Header
@@ -1063,7 +1063,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_process_claim - Check if this is a claim frame, and process it
+ * batadv_bla_process_claim() - Check if this is a claim frame, and process it
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @skb: the frame to be checked
@@ -1205,7 +1205,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_purge_backbone_gw - Remove backbone gateways after a timeout or
+ * batadv_bla_purge_backbone_gw() - Remove backbone gateways after a timeout or
  *  immediately
  * @bat_priv: the bat priv with all the soft interface information
  * @now: whether the whole hash shall be wiped now
@@ -1258,7 +1258,7 @@ purge_now:
 }
 
 /**
- * batadv_bla_purge_claims - Remove claims after a timeout or immediately
+ * batadv_bla_purge_claims() - Remove claims after a timeout or immediately
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the selected primary interface, may be NULL if now is set
  * @now: whether the whole hash shall be wiped now
@@ -1316,7 +1316,7 @@ skip:
 }
 
 /**
- * batadv_bla_update_orig_address - Update the backbone gateways when the own
+ * batadv_bla_update_orig_address() - Update the backbone gateways when the own
  *  originator address changes
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the new selected primary_if
@@ -1372,7 +1372,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_send_loopdetect - send a loopdetect frame
+ * batadv_bla_send_loopdetect() - send a loopdetect frame
  * @bat_priv: the bat priv with all the soft interface information
  * @backbone_gw: the backbone gateway for which a loop should be detected
  *
@@ -1392,7 +1392,7 @@ batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_status_update - purge bla interfaces if necessary
+ * batadv_bla_status_update() - purge bla interfaces if necessary
  * @net_dev: the soft interface net device
  */
 void batadv_bla_status_update(struct net_device *net_dev)
@@ -1412,7 +1412,7 @@ void batadv_bla_status_update(struct net_device *net_dev)
 }
 
 /**
- * batadv_bla_periodic_work - performs periodic bla work
+ * batadv_bla_periodic_work() - performs periodic bla work
  * @work: kernel work struct
  *
  * periodic work to do:
@@ -1517,7 +1517,7 @@ static struct lock_class_key batadv_claim_hash_lock_class_key;
 static struct lock_class_key batadv_backbone_hash_lock_class_key;
 
 /**
- * batadv_bla_init - initialize all bla structures
+ * batadv_bla_init() - initialize all bla structures
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success, < 0 on error.
@@ -1579,7 +1579,7 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_bla_check_bcast_duplist - Check if a frame is in the broadcast dup.
+ * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: contains the bcast_packet to be checked
  *
@@ -1652,7 +1652,7 @@ out:
 }
 
 /**
- * batadv_bla_is_backbone_gw_orig - Check if the originator is a gateway for
+ * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
  *  the VLAN identified by vid.
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: originator mac address
@@ -1692,7 +1692,7 @@ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
 }
 
 /**
- * batadv_bla_is_backbone_gw - check if originator is a backbone gw for a VLAN.
+ * batadv_bla_is_backbone_gw() - check if originator is a backbone gw for a VLAN
  * @skb: the frame to be checked
  * @orig_node: the orig_node of the frame
  * @hdr_size: maximum length of the frame
@@ -1726,7 +1726,7 @@ bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
 }
 
 /**
- * batadv_bla_free - free all bla structures
+ * batadv_bla_free() - free all bla structures
  * @bat_priv: the bat priv with all the soft interface information
  *
  * for softinterface free or module unload
@@ -1753,7 +1753,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_bla_loopdetect_check - check and handle a detected loop
+ * batadv_bla_loopdetect_check() - check and handle a detected loop
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the packet to check
  * @primary_if: interface where the request came on
@@ -1802,7 +1802,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_bla_rx - check packets coming from the mesh.
+ * batadv_bla_rx() - check packets coming from the mesh.
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
@@ -1914,7 +1914,7 @@ out:
 }
 
 /**
- * batadv_bla_tx - check packets going into the mesh
+ * batadv_bla_tx() - check packets going into the mesh
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
@@ -2022,7 +2022,7 @@ out:
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_bla_claim_table_seq_print_text - print the claim table in a seq file
+ * batadv_bla_claim_table_seq_print_text() - print the claim table in a seq file
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -2084,7 +2084,7 @@ out:
 #endif
 
 /**
- * batadv_bla_claim_dump_entry - dump one entry of the claim table
+ * batadv_bla_claim_dump_entry() - dump one entry of the claim table
  * to a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
@@ -2143,7 +2143,7 @@ out:
 }
 
 /**
- * batadv_bla_claim_dump_bucket - dump one bucket of the claim table
+ * batadv_bla_claim_dump_bucket() - dump one bucket of the claim table
  * to a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
@@ -2180,7 +2180,7 @@ unlock:
 }
 
 /**
- * batadv_bla_claim_dump - dump claim table to a netlink socket
+ * batadv_bla_claim_dump() - dump claim table to a netlink socket
  * @msg: buffer for the message
  * @cb: callback structure containing arguments
  *
@@ -2247,8 +2247,8 @@ out:
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq
- *  file
+ * batadv_bla_backbone_table_seq_print_text() - print the backbone table in a
+ *  seq file
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -2312,8 +2312,8 @@ out:
 #endif
 
 /**
- * batadv_bla_backbone_dump_entry - dump one entry of the backbone table
- * to a netlink socket
+ * batadv_bla_backbone_dump_entry() - dump one entry of the backbone table to a
+ *  netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
  * @seq: Sequence number of netlink message
@@ -2373,8 +2373,8 @@ out:
 }
 
 /**
- * batadv_bla_backbone_dump_bucket - dump one bucket of the backbone table
- * to a netlink socket
+ * batadv_bla_backbone_dump_bucket() - dump one bucket of the backbone table to
+ *  a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
  * @seq: Sequence number of netlink message
@@ -2410,7 +2410,7 @@ unlock:
 }
 
 /**
- * batadv_bla_backbone_dump - dump backbone table to a netlink socket
+ * batadv_bla_backbone_dump() - dump backbone table to a netlink socket
  * @msg: buffer for the message
  * @cb: callback structure containing arguments
  *
@@ -2477,7 +2477,7 @@ out:
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 /**
- * batadv_bla_check_claim - check if address is claimed
+ * batadv_bla_check_claim() - check if address is claimed
  *
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: mac address of which the claim status is checked
index 234775748b8eae9477f802804f004e50d2a4840c..b27571abcd2ff2a29ae0a28c7aa52a4d8fb22483 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
@@ -30,8 +31,8 @@ struct seq_file;
 struct sk_buff;
 
 /**
- * batadv_bla_is_loopdetect_mac - check if the mac address is from a loop detect
- *  frame sent by bridge loop avoidance
+ * batadv_bla_is_loopdetect_mac() - check if the mac address is from a loop
+ *  detect frame sent by bridge loop avoidance
  * @mac: mac address to check
  *
  * Return: true if the it looks like a loop detect frame
index e32ad47c6efdf17914aad1e89029020d15801150..21d1189957a7f4a91347f0bffcb528c7a0b1fc1b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -25,7 +26,6 @@
 #include <linux/fs.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
-#include <linux/sched.h> /* for linux/wait.h */
 #include <linux/seq_file.h>
 #include <linux/stat.h>
 #include <linux/stddef.h>
@@ -66,8 +66,8 @@ static int batadv_originators_open(struct inode *inode, struct file *file)
 }
 
 /**
- * batadv_originators_hardif_open - handles debugfs output for the
- *  originator table of an hard interface
+ * batadv_originators_hardif_open() - handles debugfs output for the originator
+ *  table of an hard interface
  * @inode: inode pointer to debugfs file
  * @file: pointer to the seq_file
  *
@@ -117,7 +117,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 /**
- * batadv_dat_cache_open - Prepare file handler for reads from dat_chache
+ * batadv_dat_cache_open() - Prepare file handler for reads from dat_chache
  * @inode: inode which was opened
  * @file: file handle to be initialized
  *
@@ -154,7 +154,7 @@ static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
 
 #ifdef CONFIG_BATMAN_ADV_MCAST
 /**
- * batadv_mcast_flags_open - prepare file handler for reads from mcast_flags
+ * batadv_mcast_flags_open() - prepare file handler for reads from mcast_flags
  * @inode: inode which was opened
  * @file: file handle to be initialized
  *
@@ -259,6 +259,9 @@ static struct batadv_debuginfo *batadv_hardif_debuginfos[] = {
        NULL,
 };
 
+/**
+ * batadv_debugfs_init() - Initialize soft interface independent debugfs entries
+ */
 void batadv_debugfs_init(void)
 {
        struct batadv_debuginfo **bat_debug;
@@ -289,6 +292,9 @@ err:
        batadv_debugfs = NULL;
 }
 
+/**
+ * batadv_debugfs_destroy() - Remove all debugfs entries
+ */
 void batadv_debugfs_destroy(void)
 {
        debugfs_remove_recursive(batadv_debugfs);
@@ -296,7 +302,7 @@ void batadv_debugfs_destroy(void)
 }
 
 /**
- * batadv_debugfs_add_hardif - creates the base directory for a hard interface
+ * batadv_debugfs_add_hardif() - creates the base directory for a hard interface
  *  in debugfs.
  * @hard_iface: hard interface which should be added.
  *
@@ -338,7 +344,7 @@ out:
 }
 
 /**
- * batadv_debugfs_del_hardif - delete the base directory for a hard interface
+ * batadv_debugfs_del_hardif() - delete the base directory for a hard interface
  *  in debugfs.
  * @hard_iface: hard interface which is deleted.
  */
@@ -355,6 +361,12 @@ void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
        }
 }
 
+/**
+ * batadv_debugfs_add_meshif() - Initialize interface dependent debugfs entries
+ * @dev: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_debugfs_add_meshif(struct net_device *dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
@@ -401,6 +413,10 @@ out:
        return -ENOMEM;
 }
 
+/**
+ * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries
+ * @dev: netdev struct of the soft interface
+ */
 void batadv_debugfs_del_meshif(struct net_device *dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
index 9c5d4a65b98c35239709d9258bb889a61ffef8c2..90a08d35c501862f8cdfb039a13fc50f470a551f 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
index 760c0de725826c752114b86fa442c168cd67c543..9703c791ffc5ac2fd375531073f134e6beb86141 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
@@ -23,7 +24,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
@@ -55,7 +56,7 @@
 static void batadv_dat_purge(struct work_struct *work);
 
 /**
- * batadv_dat_start_timer - initialise the DAT periodic worker
+ * batadv_dat_start_timer() - initialise the DAT periodic worker
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
@@ -66,7 +67,7 @@ static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_entry_release - release dat_entry from lists and queue for free
+ * batadv_dat_entry_release() - release dat_entry from lists and queue for free
  *  after rcu grace period
  * @ref: kref pointer of the dat_entry
  */
@@ -80,7 +81,7 @@ static void batadv_dat_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_dat_entry_put - decrement the dat_entry refcounter and possibly
+ * batadv_dat_entry_put() - decrement the dat_entry refcounter and possibly
  *  release it
  * @dat_entry: dat_entry to be free'd
  */
@@ -90,7 +91,7 @@ static void batadv_dat_entry_put(struct batadv_dat_entry *dat_entry)
 }
 
 /**
- * batadv_dat_to_purge - check whether a dat_entry has to be purged or not
+ * batadv_dat_to_purge() - check whether a dat_entry has to be purged or not
  * @dat_entry: the entry to check
  *
  * Return: true if the entry has to be purged now, false otherwise.
@@ -102,7 +103,7 @@ static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
 }
 
 /**
- * __batadv_dat_purge - delete entries from the DAT local storage
+ * __batadv_dat_purge() - delete entries from the DAT local storage
  * @bat_priv: the bat priv with all the soft interface information
  * @to_purge: function in charge to decide whether an entry has to be purged or
  *           not. This function takes the dat_entry as argument and has to
@@ -145,8 +146,8 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_purge - periodic task that deletes old entries from the local DAT
- * hash table
+ * batadv_dat_purge() - periodic task that deletes old entries from the local
+ *  DAT hash table
  * @work: kernel work struct
  */
 static void batadv_dat_purge(struct work_struct *work)
@@ -164,7 +165,7 @@ static void batadv_dat_purge(struct work_struct *work)
 }
 
 /**
- * batadv_compare_dat - comparing function used in the local DAT hash table
+ * batadv_compare_dat() - comparing function used in the local DAT hash table
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
@@ -179,7 +180,7 @@ static bool batadv_compare_dat(const struct hlist_node *node, const void *data2)
 }
 
 /**
- * batadv_arp_hw_src - extract the hw_src field from an ARP packet
+ * batadv_arp_hw_src() - extract the hw_src field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -196,7 +197,7 @@ static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_arp_ip_src - extract the ip_src field from an ARP packet
+ * batadv_arp_ip_src() - extract the ip_src field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -208,7 +209,7 @@ static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_arp_hw_dst - extract the hw_dst field from an ARP packet
+ * batadv_arp_hw_dst() - extract the hw_dst field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -220,7 +221,7 @@ static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_arp_ip_dst - extract the ip_dst field from an ARP packet
+ * batadv_arp_ip_dst() - extract the ip_dst field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -232,7 +233,7 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_hash_dat - compute the hash value for an IP address
+ * batadv_hash_dat() - compute the hash value for an IP address
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -267,7 +268,7 @@ static u32 batadv_hash_dat(const void *data, u32 size)
 }
 
 /**
- * batadv_dat_entry_hash_find - look for a given dat_entry in the local hash
+ * batadv_dat_entry_hash_find() - look for a given dat_entry in the local hash
  * table
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: search key
@@ -310,7 +311,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
 }
 
 /**
- * batadv_dat_entry_add - add a new dat entry or update it if already exists
+ * batadv_dat_entry_add() - add a new dat entry or update it if already exists
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: ipv4 to add/edit
  * @mac_addr: mac address to assign to the given ipv4
@@ -367,7 +368,8 @@ out:
 #ifdef CONFIG_BATMAN_ADV_DEBUG
 
 /**
- * batadv_dbg_arp - print a debug message containing all the ARP packet details
+ * batadv_dbg_arp() - print a debug message containing all the ARP packet
+ *  details
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
@@ -448,7 +450,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
 #endif /* CONFIG_BATMAN_ADV_DEBUG */
 
 /**
- * batadv_is_orig_node_eligible - check whether a node can be a DHT candidate
+ * batadv_is_orig_node_eligible() - check whether a node can be a DHT candidate
  * @res: the array with the already selected candidates
  * @select: number of already selected candidates
  * @tmp_max: address of the currently evaluated node
@@ -502,7 +504,7 @@ out:
 }
 
 /**
- * batadv_choose_next_candidate - select the next DHT candidate
+ * batadv_choose_next_candidate() - select the next DHT candidate
  * @bat_priv: the bat priv with all the soft interface information
  * @cands: candidates array
  * @select: number of candidates already present in the array
@@ -566,8 +568,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_select_candidates - select the nodes which the DHT message has to
- * be sent to
+ * batadv_dat_select_candidates() - select the nodes which the DHT message has
+ *  to be sent to
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_dst: ipv4 to look up in the DHT
  * @vid: VLAN identifier
@@ -612,7 +614,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
 }
 
 /**
- * batadv_dat_send_data - send a payload to the selected candidates
+ * batadv_dat_send_data() - send a payload to the selected candidates
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @ip: the DHT key
@@ -688,7 +690,7 @@ out:
 }
 
 /**
- * batadv_dat_tvlv_container_update - update the dat tvlv container after dat
+ * batadv_dat_tvlv_container_update() - update the dat tvlv container after dat
  *  setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -710,7 +712,7 @@ static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_status_update - update the dat tvlv container after dat
+ * batadv_dat_status_update() - update the dat tvlv container after dat
  *  setting change
  * @net_dev: the soft interface net device
  */
@@ -722,7 +724,7 @@ void batadv_dat_status_update(struct net_device *net_dev)
 }
 
 /**
- * batadv_dat_tvlv_ogm_handler_v1 - process incoming dat tvlv container
+ * batadv_dat_tvlv_ogm_handler_v1() - process incoming dat tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -741,7 +743,7 @@ static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_hash_free - free the local DAT hash table
+ * batadv_dat_hash_free() - free the local DAT hash table
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
@@ -757,7 +759,7 @@ static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_init - initialise the DAT internals
+ * batadv_dat_init() - initialise the DAT internals
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 in case of success, a negative error code otherwise
@@ -782,7 +784,7 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_free - free the DAT internals
+ * batadv_dat_free() - free the DAT internals
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_dat_free(struct batadv_priv *bat_priv)
@@ -797,7 +799,7 @@ void batadv_dat_free(struct batadv_priv *bat_priv)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_dat_cache_seq_print_text - print the local DAT hash table
+ * batadv_dat_cache_seq_print_text() - print the local DAT hash table
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -850,7 +852,7 @@ out:
 #endif
 
 /**
- * batadv_arp_get_type - parse an ARP packet and gets the type
+ * batadv_arp_get_type() - parse an ARP packet and gets the type
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to analyse
  * @hdr_size: size of the possible header before the ARP packet in the skb
@@ -924,7 +926,7 @@ out:
 }
 
 /**
- * batadv_dat_get_vid - extract the VLAN identifier from skb if any
+ * batadv_dat_get_vid() - extract the VLAN identifier from skb if any
  * @skb: the buffer containing the packet to extract the VID from
  * @hdr_size: the size of the batman-adv header encapsulating the packet
  *
@@ -950,7 +952,7 @@ static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
 }
 
 /**
- * batadv_dat_arp_create_reply - create an ARP Reply
+ * batadv_dat_arp_create_reply() - create an ARP Reply
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_src: ARP sender IP
  * @ip_dst: ARP target IP
@@ -985,7 +987,7 @@ batadv_dat_arp_create_reply(struct batadv_priv *bat_priv, __be32 ip_src,
 }
 
 /**
- * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to
+ * batadv_dat_snoop_outgoing_arp_request() - snoop the ARP request and try to
  * answer using DAT
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
@@ -1083,7 +1085,7 @@ out:
 }
 
 /**
- * batadv_dat_snoop_incoming_arp_request - snoop the ARP request and try to
+ * batadv_dat_snoop_incoming_arp_request() - snoop the ARP request and try to
  * answer using the local DAT storage
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
@@ -1153,7 +1155,7 @@ out:
 }
 
 /**
- * batadv_dat_snoop_outgoing_arp_reply - snoop the ARP reply and fill the DHT
+ * batadv_dat_snoop_outgoing_arp_reply() - snoop the ARP reply and fill the DHT
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  */
@@ -1193,8 +1195,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local
- * DAT storage only
+ * batadv_dat_snoop_incoming_arp_reply() - snoop the ARP reply and fill the
+ *  local DAT storage only
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  * @hdr_size: size of the encapsulation header
@@ -1282,8 +1284,8 @@ out:
 }
 
 /**
- * batadv_dat_drop_broadcast_packet - check if an ARP request has to be dropped
- * (because the node has already obtained the reply via DAT) or not
+ * batadv_dat_drop_broadcast_packet() - check if an ARP request has to be
+ *  dropped (because the node has already obtained the reply via DAT) or not
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the broadcast packet
  *
index ec364a3c1c6691dc4d5659d6edd2693c607e128a..12897eb46268c6b1b92def9f39666280d52cfdec 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
@@ -23,9 +24,9 @@
 #include <linux/compiler.h>
 #include <linux/netdevice.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "originator.h"
-#include "packet.h"
 
 struct seq_file;
 struct sk_buff;
@@ -48,7 +49,7 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
                                      struct batadv_forw_packet *forw_packet);
 
 /**
- * batadv_dat_init_orig_node_addr - assign a DAT address to the orig_node
+ * batadv_dat_init_orig_node_addr() - assign a DAT address to the orig_node
  * @orig_node: the node to assign the DAT address to
  */
 static inline void
@@ -61,7 +62,7 @@ batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_dat_init_own_addr - assign a DAT address to the node itself
+ * batadv_dat_init_own_addr() - assign a DAT address to the node itself
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: a pointer to the primary interface
  */
@@ -82,7 +83,7 @@ void batadv_dat_free(struct batadv_priv *bat_priv);
 int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset);
 
 /**
- * batadv_dat_inc_counter - increment the correct DAT packet counter
+ * batadv_dat_inc_counter() - increment the correct DAT packet counter
  * @bat_priv: the bat priv with all the soft interface information
  * @subtype: the 4addr subtype of the packet to be counted
  *
index ebe6e38934e46ed5de4d30204e791dbe40285fcc..22dde42fd80e63e79faebfdb0d807786f1b9a4b5 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
@@ -22,7 +23,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "soft-interface.h"
 
 /**
- * batadv_frag_clear_chain - delete entries in the fragment buffer chain
+ * batadv_frag_clear_chain() - delete entries in the fragment buffer chain
  * @head: head of chain with entries.
  * @dropped: whether the chain is cleared because all fragments are dropped
  *
@@ -65,7 +66,7 @@ static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
 }
 
 /**
- * batadv_frag_purge_orig - free fragments associated to an orig
+ * batadv_frag_purge_orig() - free fragments associated to an orig
  * @orig_node: originator to free fragments from
  * @check_cb: optional function to tell if an entry should be purged
  */
@@ -89,7 +90,7 @@ void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_frag_size_limit - maximum possible size of packet to be fragmented
+ * batadv_frag_size_limit() - maximum possible size of packet to be fragmented
  *
  * Return: the maximum size of payload that can be fragmented.
  */
@@ -104,7 +105,7 @@ static int batadv_frag_size_limit(void)
 }
 
 /**
- * batadv_frag_init_chain - check and prepare fragment chain for new fragment
+ * batadv_frag_init_chain() - check and prepare fragment chain for new fragment
  * @chain: chain in fragments table to init
  * @seqno: sequence number of the received fragment
  *
@@ -134,7 +135,7 @@ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
 }
 
 /**
- * batadv_frag_insert_packet - insert a fragment into a fragment chain
+ * batadv_frag_insert_packet() - insert a fragment into a fragment chain
  * @orig_node: originator that the fragment was received from
  * @skb: skb to insert
  * @chain_out: list head to attach complete chains of fragments to
@@ -248,7 +249,7 @@ err:
 }
 
 /**
- * batadv_frag_merge_packets - merge a chain of fragments
+ * batadv_frag_merge_packets() - merge a chain of fragments
  * @chain: head of chain with fragments
  *
  * Expand the first skb in the chain and copy the content of the remaining
@@ -306,7 +307,7 @@ free:
 }
 
 /**
- * batadv_frag_skb_buffer - buffer fragment for later merge
+ * batadv_frag_skb_buffer() - buffer fragment for later merge
  * @skb: skb to buffer
  * @orig_node_src: originator that the skb is received from
  *
@@ -346,7 +347,7 @@ out_err:
 }
 
 /**
- * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
+ * batadv_frag_skb_fwd() - forward fragments that would exceed MTU when merged
  * @skb: skb to forward
  * @recv_if: interface that the skb is received on
  * @orig_node_src: originator that the skb is received from
@@ -400,7 +401,7 @@ out:
 }
 
 /**
- * batadv_frag_create - create a fragment from skb
+ * batadv_frag_create() - create a fragment from skb
  * @skb: skb to create fragment from
  * @frag_head: header to use in new fragment
  * @fragment_size: size of new fragment
@@ -438,7 +439,7 @@ err:
 }
 
 /**
- * batadv_frag_send_packet - create up to 16 fragments from the passed skb
+ * batadv_frag_send_packet() - create up to 16 fragments from the passed skb
  * @skb: skb to create fragments from
  * @orig_node: final destination of the created fragments
  * @neigh_node: next-hop of the created fragments
index 1a2d6c3087455d630d574b6d7b749fadda0a136e..138b22a1836aff700e8be0ae406ed69a3ae21990 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
@@ -39,7 +40,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                            struct batadv_neigh_node *neigh_node);
 
 /**
- * batadv_frag_check_entry - check if a list of fragments has timed out
+ * batadv_frag_check_entry() - check if a list of fragments has timed out
  * @frags_entry: table entry to check
  *
  * Return: true if the frags entry has timed out, false otherwise.
index 10d521f0b17f6faaaeae50cfb2a624cb86be93dd..37fe9a644f22f29d2fdd360f1f6bd63a22cd2680 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -22,7 +23,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/in.h>
@@ -42,6 +43,7 @@
 #include <linux/stddef.h>
 #include <linux/udp.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "gateway_common.h"
@@ -49,7 +51,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "soft-interface.h"
 #include "sysfs.h"
@@ -68,8 +69,8 @@
 #define BATADV_DHCP_CHADDR_OFFSET      28
 
 /**
- * batadv_gw_node_release - release gw_node from lists and queue for free after
- *  rcu grace period
+ * batadv_gw_node_release() - release gw_node from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the gw_node
  */
 static void batadv_gw_node_release(struct kref *ref)
@@ -83,7 +84,8 @@ static void batadv_gw_node_release(struct kref *ref)
 }
 
 /**
- * batadv_gw_node_put - decrement the gw_node refcounter and possibly release it
+ * batadv_gw_node_put() - decrement the gw_node refcounter and possibly release
+ *  it
  * @gw_node: gateway node to free
  */
 void batadv_gw_node_put(struct batadv_gw_node *gw_node)
@@ -91,6 +93,12 @@ void batadv_gw_node_put(struct batadv_gw_node *gw_node)
        kref_put(&gw_node->refcount, batadv_gw_node_release);
 }
 
+/**
+ * batadv_gw_get_selected_gw_node() - Get currently selected gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: selected gateway (with increased refcnt), NULL on errors
+ */
 struct batadv_gw_node *
 batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
 {
@@ -109,6 +117,12 @@ out:
        return gw_node;
 }
 
+/**
+ * batadv_gw_get_selected_orig() - Get originator of currently selected gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: orig_node of selected gateway (with increased refcnt), NULL on errors
+ */
 struct batadv_orig_node *
 batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
 {
@@ -155,7 +169,7 @@ static void batadv_gw_select(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_reselect - force a gateway reselection
+ * batadv_gw_reselect() - force a gateway reselection
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Set a flag to remind the GW component to perform a new gateway reselection.
@@ -171,7 +185,7 @@ void batadv_gw_reselect(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_gw_check_client_stop - check if client mode has been switched off
+ * batadv_gw_check_client_stop() - check if client mode has been switched off
  * @bat_priv: the bat priv with all the soft interface information
  *
  * This function assumes the caller has checked that the gw state *is actually
@@ -202,6 +216,10 @@ void batadv_gw_check_client_stop(struct batadv_priv *bat_priv)
        batadv_gw_node_put(curr_gw);
 }
 
+/**
+ * batadv_gw_election() - Elect the best gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_gw_election(struct batadv_priv *bat_priv)
 {
        struct batadv_gw_node *curr_gw = NULL;
@@ -290,6 +308,11 @@ out:
                batadv_neigh_ifinfo_put(router_ifinfo);
 }
 
+/**
+ * batadv_gw_check_election() - Elect orig node as best gateway when eligible
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is to be checked
+ */
 void batadv_gw_check_election(struct batadv_priv *bat_priv,
                              struct batadv_orig_node *orig_node)
 {
@@ -321,7 +344,7 @@ out:
 }
 
 /**
- * batadv_gw_node_add - add gateway node to list of available gateways
+ * batadv_gw_node_add() - add gateway node to list of available gateways
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
  * @gateway: announced bandwidth information
@@ -364,7 +387,7 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_node_get - retrieve gateway node from list of available gateways
+ * batadv_gw_node_get() - retrieve gateway node from list of available gateways
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
  *
@@ -393,7 +416,7 @@ struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_node_update - update list of available gateways with changed
+ * batadv_gw_node_update() - update list of available gateways with changed
  *  bandwidth information
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
@@ -458,6 +481,11 @@ out:
                batadv_gw_node_put(gw_node);
 }
 
+/**
+ * batadv_gw_node_delete() - Remove orig_node from gateway list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is currently in process of being removed
+ */
 void batadv_gw_node_delete(struct batadv_priv *bat_priv,
                           struct batadv_orig_node *orig_node)
 {
@@ -469,6 +497,10 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
        batadv_gw_node_update(bat_priv, orig_node, &gateway);
 }
 
+/**
+ * batadv_gw_node_free() - Free gateway information from soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_gw_node_free(struct batadv_priv *bat_priv)
 {
        struct batadv_gw_node *gw_node;
@@ -484,6 +516,14 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv)
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_gw_client_seq_print_text() - Print the gateway table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
@@ -514,7 +554,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_gw_dump - Dump gateways into a message
+ * batadv_gw_dump() - Dump gateways into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  *
@@ -567,7 +607,7 @@ out:
 }
 
 /**
- * batadv_gw_dhcp_recipient_get - check if a packet is a DHCP message
+ * batadv_gw_dhcp_recipient_get() - check if a packet is a DHCP message
  * @skb: the packet to check
  * @header_len: a pointer to the batman-adv header size
  * @chaddr: buffer where the client address will be stored. Valid
@@ -686,7 +726,8 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
 }
 
 /**
- * batadv_gw_out_of_range - check if the dhcp request destination is the best gw
+ * batadv_gw_out_of_range() - check if the dhcp request destination is the best
+ *  gateway
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the outgoing packet
  *
index 3baa3d466e5e1089d1e0dcd9fb62de6c7cfe01c1..981f58421a323b9d9941450fdc6fb693199283f0 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
index 2c26039c23fcd79385ea11031fc17973840700a3..b3e156af2256cff85a1d362112ccf577ac7fa348 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
 #include <linux/netdevice.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "gateway_client.h"
 #include "log.h"
-#include "packet.h"
 #include "tvlv.h"
 
 /**
- * batadv_parse_throughput - parse supplied string buffer to extract throughput
- *  information
+ * batadv_parse_throughput() - parse supplied string buffer to extract
+ *  throughput information
  * @net_dev: the soft interface net device
  * @buff: string buffer to parse
  * @description: text shown when throughput string cannot be parsed
@@ -100,8 +101,8 @@ bool batadv_parse_throughput(struct net_device *net_dev, char *buff,
 }
 
 /**
- * batadv_parse_gw_bandwidth - parse supplied string buffer to extract download
- *  and upload bandwidth information
+ * batadv_parse_gw_bandwidth() - parse supplied string buffer to extract
+ *  download and upload bandwidth information
  * @net_dev: the soft interface net device
  * @buff: string buffer to parse
  * @down: pointer holding the returned download bandwidth information
@@ -136,8 +137,8 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
 }
 
 /**
- * batadv_gw_tvlv_container_update - update the gw tvlv container after gateway
- *  setting change
+ * batadv_gw_tvlv_container_update() - update the gw tvlv container after
+ *  gateway setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
@@ -164,6 +165,15 @@ void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
        }
 }
 
+/**
+ * batadv_gw_bandwidth_set() - Parse and set download/upload gateway bandwidth
+ *  from supplied string buffer
+ * @net_dev: netdev struct of the soft interface
+ * @buff: the buffer containing the user data
+ * @count: number of bytes in the buffer
+ *
+ * Return: 'count' on success or a negative error code in case of failure
+ */
 ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
                                size_t count)
 {
@@ -207,7 +217,7 @@ ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
 }
 
 /**
- * batadv_gw_tvlv_ogm_handler_v1 - process incoming gateway tvlv container
+ * batadv_gw_tvlv_ogm_handler_v1() - process incoming gateway tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -248,7 +258,7 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_init - initialise the gateway handling internals
+ * batadv_gw_init() - initialise the gateway handling internals
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_gw_init(struct batadv_priv *bat_priv)
@@ -264,7 +274,7 @@ void batadv_gw_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_gw_free - free the gateway handling internals
+ * batadv_gw_free() - free the gateway handling internals
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_gw_free(struct batadv_priv *bat_priv)
index 0a6a97d201f27513a99ed4b3261138a2c6a8f0e8..afebd9c7edf454ed50e3ddd16211c4a4d3ac5f33 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -32,11 +33,12 @@ enum batadv_gw_modes {
 
 /**
  * enum batadv_bandwidth_units - bandwidth unit types
- * @BATADV_BW_UNIT_KBIT: unit type kbit
- * @BATADV_BW_UNIT_MBIT: unit type mbit
  */
 enum batadv_bandwidth_units {
+       /** @BATADV_BW_UNIT_KBIT: unit type kbit */
        BATADV_BW_UNIT_KBIT,
+
+       /** @BATADV_BW_UNIT_MBIT: unit type mbit */
        BATADV_BW_UNIT_MBIT,
 };
 
index 4e3d5340ad96842e0a12bf5d2892133dec82da37..5f186bff284a37132eaa4862854308163269a22f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -22,7 +23,7 @@
 #include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
@@ -37,6 +38,7 @@
 #include <linux/spinlock.h>
 #include <net/net_namespace.h>
 #include <net/rtnetlink.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_v.h"
 #include "bridge_loop_avoidance.h"
 #include "gateway_client.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "soft-interface.h"
 #include "sysfs.h"
 #include "translation-table.h"
 
 /**
- * batadv_hardif_release - release hard interface from lists and queue for
+ * batadv_hardif_release() - release hard interface from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the hard interface
  */
@@ -66,6 +67,12 @@ void batadv_hardif_release(struct kref *ref)
        kfree_rcu(hard_iface, rcu);
 }
 
+/**
+ * batadv_hardif_get_by_netdev() - Get hard interface object of a net_device
+ * @net_dev: net_device to search for
+ *
+ * Return: batadv_hard_iface of net_dev (with increased refcnt), NULL on errors
+ */
 struct batadv_hard_iface *
 batadv_hardif_get_by_netdev(const struct net_device *net_dev)
 {
@@ -86,7 +93,7 @@ out:
 }
 
 /**
- * batadv_getlink_net - return link net namespace (of use fallback)
+ * batadv_getlink_net() - return link net namespace (of use fallback)
  * @netdev: net_device to check
  * @fallback_net: return in case get_link_net is not available for @netdev
  *
@@ -105,7 +112,7 @@ static struct net *batadv_getlink_net(const struct net_device *netdev,
 }
 
 /**
- * batadv_mutual_parents - check if two devices are each others parent
+ * batadv_mutual_parents() - check if two devices are each others parent
  * @dev1: 1st net dev
  * @net1: 1st devices netns
  * @dev2: 2nd net dev
@@ -138,7 +145,7 @@ static bool batadv_mutual_parents(const struct net_device *dev1,
 }
 
 /**
- * batadv_is_on_batman_iface - check if a device is a batman iface descendant
+ * batadv_is_on_batman_iface() - check if a device is a batman iface descendant
  * @net_dev: the device to check
  *
  * If the user creates any virtual device on top of a batman-adv interface, it
@@ -202,7 +209,7 @@ static bool batadv_is_valid_iface(const struct net_device *net_dev)
 }
 
 /**
- * batadv_get_real_netdevice - check if the given netdev struct is a virtual
+ * batadv_get_real_netdevice() - check if the given netdev struct is a virtual
  *  interface on top of another 'real' interface
  * @netdev: the device to check
  *
@@ -246,7 +253,7 @@ out:
 }
 
 /**
- * batadv_get_real_netdev - check if the given net_device struct is a virtual
+ * batadv_get_real_netdev() - check if the given net_device struct is a virtual
  *  interface on top of another 'real' interface
  * @net_device: the device to check
  *
@@ -265,7 +272,7 @@ struct net_device *batadv_get_real_netdev(struct net_device *net_device)
 }
 
 /**
- * batadv_is_wext_netdev - check if the given net_device struct is a
+ * batadv_is_wext_netdev() - check if the given net_device struct is a
  *  wext wifi interface
  * @net_device: the device to check
  *
@@ -289,7 +296,7 @@ static bool batadv_is_wext_netdev(struct net_device *net_device)
 }
 
 /**
- * batadv_is_cfg80211_netdev - check if the given net_device struct is a
+ * batadv_is_cfg80211_netdev() - check if the given net_device struct is a
  *  cfg80211 wifi interface
  * @net_device: the device to check
  *
@@ -309,7 +316,7 @@ static bool batadv_is_cfg80211_netdev(struct net_device *net_device)
 }
 
 /**
- * batadv_wifi_flags_evaluate - calculate wifi flags for net_device
+ * batadv_wifi_flags_evaluate() - calculate wifi flags for net_device
  * @net_device: the device to check
  *
  * Return: batadv_hard_iface_wifi_flags flags of the device
@@ -344,7 +351,7 @@ out:
 }
 
 /**
- * batadv_is_cfg80211_hardif - check if the given hardif is a cfg80211 wifi
+ * batadv_is_cfg80211_hardif() - check if the given hardif is a cfg80211 wifi
  *  interface
  * @hard_iface: the device to check
  *
@@ -362,7 +369,7 @@ bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_is_wifi_hardif - check if the given hardif is a wifi interface
+ * batadv_is_wifi_hardif() - check if the given hardif is a wifi interface
  * @hard_iface: the device to check
  *
  * Return: true if the net device is a 802.11 wireless device, false otherwise.
@@ -376,7 +383,7 @@ bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_hardif_no_broadcast - check whether (re)broadcast is necessary
+ * batadv_hardif_no_broadcast() - check whether (re)broadcast is necessary
  * @if_outgoing: the outgoing interface checked and considered for (re)broadcast
  * @orig_addr: the originator of this packet
  * @orig_neigh: originator address of the forwarder we just got the packet from
@@ -560,6 +567,13 @@ static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
        soft_iface->needed_tailroom = lower_tailroom;
 }
 
+/**
+ * batadv_hardif_min_mtu() - Calculate maximum MTU for soft interface
+ * @soft_iface: netdev struct of the soft interface
+ *
+ * Return: MTU for the soft-interface (limited by the minimal MTU of all active
+ *  slave interfaces)
+ */
 int batadv_hardif_min_mtu(struct net_device *soft_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -606,7 +620,11 @@ out:
        return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN);
 }
 
-/* adjusts the MTU if a new interface with a smaller MTU appeared. */
+/**
+ * batadv_update_min_mtu() - Adjusts the MTU if a new interface with a smaller
+ *  MTU appeared
+ * @soft_iface: netdev struct of the soft interface
+ */
 void batadv_update_min_mtu(struct net_device *soft_iface)
 {
        soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
@@ -667,7 +685,7 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_master_del_slave - remove hard_iface from the current master interface
+ * batadv_master_del_slave() - remove hard_iface from the current master iface
  * @slave: the interface enslaved in another master
  * @master: the master from which slave has to be removed
  *
@@ -691,6 +709,14 @@ static int batadv_master_del_slave(struct batadv_hard_iface *slave,
        return ret;
 }
 
+/**
+ * batadv_hardif_enable_interface() - Enslave hard interface to soft interface
+ * @hard_iface: hard interface to add to soft interface
+ * @net: the applicable net namespace
+ * @iface_name: name of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
                                   struct net *net, const char *iface_name)
 {
@@ -802,6 +828,12 @@ err:
        return ret;
 }
 
+/**
+ * batadv_hardif_disable_interface() - Remove hard interface from soft interface
+ * @hard_iface: hard interface to be removed
+ * @autodel: whether to delete soft interface when it doesn't contain any other
+ *  slave interfaces
+ */
 void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
                                     enum batadv_hard_if_cleanup autodel)
 {
@@ -936,6 +968,9 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
        batadv_hardif_put(hard_iface);
 }
 
+/**
+ * batadv_hardif_remove_interfaces() - Remove all hard interfaces
+ */
 void batadv_hardif_remove_interfaces(void)
 {
        struct batadv_hard_iface *hard_iface, *hard_iface_tmp;
index 9f9890ff7a22f0ea188b9135a7f3c6fb1958d6b8..de5e9a374ece5f05ae36399cef49f062000a532c 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
 struct net_device;
 struct net;
 
+/**
+ * enum batadv_hard_if_state - State of a hard interface
+ */
 enum batadv_hard_if_state {
+       /**
+        * @BATADV_IF_NOT_IN_USE: interface is not used as slave interface of a
+        * batman-adv soft interface
+        */
        BATADV_IF_NOT_IN_USE,
+
+       /**
+        * @BATADV_IF_TO_BE_REMOVED: interface will be removed from soft
+        * interface
+        */
        BATADV_IF_TO_BE_REMOVED,
+
+       /** @BATADV_IF_INACTIVE: interface is deactivated */
        BATADV_IF_INACTIVE,
+
+       /** @BATADV_IF_ACTIVE: interface is used */
        BATADV_IF_ACTIVE,
+
+       /** @BATADV_IF_TO_BE_ACTIVATED: interface is getting activated */
        BATADV_IF_TO_BE_ACTIVATED,
+
+       /**
+        * @BATADV_IF_I_WANT_YOU: interface is queued up (using sysfs) for being
+        * added as slave interface of a batman-adv soft interface
+        */
        BATADV_IF_I_WANT_YOU,
 };
 
 /**
  * enum batadv_hard_if_bcast - broadcast avoidance options
- * @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface
- * @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no recipient
- * @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it from
- * @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator
  */
 enum batadv_hard_if_bcast {
+       /** @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface */
        BATADV_HARDIF_BCAST_OK = 0,
+
+       /**
+        * @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no
+        *  recipient
+        */
        BATADV_HARDIF_BCAST_NORECIPIENT,
+
+       /**
+        * @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it
+        *  from
+        */
        BATADV_HARDIF_BCAST_DUPFWD,
+
+       /** @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator */
        BATADV_HARDIF_BCAST_DUPORIG,
 };
 
 /**
  * enum batadv_hard_if_cleanup - Cleanup modi for soft_iface after slave removal
- * @BATADV_IF_CLEANUP_KEEP: Don't automatically delete soft-interface
- * @BATADV_IF_CLEANUP_AUTO: Delete soft-interface after last slave was removed
  */
 enum batadv_hard_if_cleanup {
+       /**
+        * @BATADV_IF_CLEANUP_KEEP: Don't automatically delete soft-interface
+        */
        BATADV_IF_CLEANUP_KEEP,
+
+       /**
+        * @BATADV_IF_CLEANUP_AUTO: Delete soft-interface after last slave was
+        *  removed
+        */
        BATADV_IF_CLEANUP_AUTO,
 };
 
@@ -82,7 +121,7 @@ int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing,
                               u8 *orig_addr, u8 *orig_neigh);
 
 /**
- * batadv_hardif_put - decrement the hard interface refcounter and possibly
+ * batadv_hardif_put() - decrement the hard interface refcounter and possibly
  *  release it
  * @hard_iface: the hard interface to free
  */
@@ -91,6 +130,12 @@ static inline void batadv_hardif_put(struct batadv_hard_iface *hard_iface)
        kref_put(&hard_iface->refcount, batadv_hardif_release);
 }
 
+/**
+ * batadv_primary_if_get_selected() - Get reference to primary interface
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: primary interface (with increased refcnt), otherwise NULL
+ */
 static inline struct batadv_hard_iface *
 batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
 {
index b5f7e13918acdfa35ba73d92a31d8292faf94669..04d964358c98750763ab4c9c6952f82ae115de99 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -18,7 +19,7 @@
 #include "hash.h"
 #include "main.h"
 
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/lockdep.h>
 #include <linux/slab.h>
 
@@ -33,7 +34,10 @@ static void batadv_hash_init(struct batadv_hashtable *hash)
        }
 }
 
-/* free only the hashtable and the hash itself. */
+/**
+ * batadv_hash_destroy() - Free only the hashtable and the hash itself
+ * @hash: hash object to destroy
+ */
 void batadv_hash_destroy(struct batadv_hashtable *hash)
 {
        kfree(hash->list_locks);
@@ -41,7 +45,12 @@ void batadv_hash_destroy(struct batadv_hashtable *hash)
        kfree(hash);
 }
 
-/* allocates and clears the hash */
+/**
+ * batadv_hash_new() - Allocates and clears the hashtable
+ * @size: number of hash buckets to allocate
+ *
+ * Return: newly allocated hashtable, NULL on errors
+ */
 struct batadv_hashtable *batadv_hash_new(u32 size)
 {
        struct batadv_hashtable *hash;
@@ -70,6 +79,11 @@ free_hash:
        return NULL;
 }
 
+/**
+ * batadv_hash_set_lock_class() - Set specific lockdep class for hash spinlocks
+ * @hash: hash object to modify
+ * @key: lockdep class key address
+ */
 void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
                                struct lock_class_key *key)
 {
index 0c905e91c5e285136f4959372c02317f5fc41040..4ce1b6d3ad5c7d570697adf5b4f84a24d92faef1 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -45,10 +46,18 @@ typedef bool (*batadv_hashdata_compare_cb)(const struct hlist_node *,
 typedef u32 (*batadv_hashdata_choose_cb)(const void *, u32);
 typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
 
+/**
+ * struct batadv_hashtable - Wrapper of simple hlist based hashtable
+ */
 struct batadv_hashtable {
-       struct hlist_head *table;   /* the hashtable itself with the buckets */
-       spinlock_t *list_locks;     /* spinlock for each hash list entry */
-       u32 size;                   /* size of hashtable */
+       /** @table: the hashtable itself with the buckets */
+       struct hlist_head *table;
+
+       /** @list_locks: spinlock for each hash list entry */
+       spinlock_t *list_locks;
+
+       /** @size: size of hashtable */
+       u32 size;
 };
 
 /* allocates and clears the hash */
@@ -62,7 +71,7 @@ void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
 void batadv_hash_destroy(struct batadv_hashtable *hash);
 
 /**
- *     batadv_hash_add - adds data to the hashtable
+ *     batadv_hash_add() - adds data to the hashtable
  *     @hash: storage hash table
  *     @compare: callback to determine if 2 hash elements are identical
  *     @choose: callback calculating the hash index
@@ -112,8 +121,15 @@ out:
        return ret;
 }
 
-/* removes data from hash, if found. data could be the structure you use with
- * just the key filled, we just need the key for comparing.
+/**
+ * batadv_hash_remove() - Removes data from hash, if found
+ * @hash: hash table
+ * @compare: callback to determine if 2 hash elements are identical
+ * @choose: callback calculating the hash index
+ * @data: data passed to the aforementioned callbacks as argument
+ *
+ * ata could be the structure you use with  just the key filled, we just need
+ * the key for comparing.
  *
  * Return: returns pointer do data on success, so you can remove the used
  * structure yourself, or NULL on error
index bded31121d122e7d9053fc0731e9b61c9efef73c..8041cf106c37fe34589861ce8cfb9a796d26ca7b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -26,6 +27,7 @@
 #include <linux/export.h>
 #include <linux/fcntl.h>
 #include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/string.h>
 #include <linux/uaccess.h>
 #include <linux/wait.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 
 static struct batadv_socket_client *batadv_socket_client_hash[256];
@@ -55,6 +57,9 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
                                     struct batadv_icmp_header *icmph,
                                     size_t icmp_len);
 
+/**
+ * batadv_socket_init() - Initialize soft interface independent socket data
+ */
 void batadv_socket_init(void)
 {
        memset(batadv_socket_client_hash, 0, sizeof(batadv_socket_client_hash));
@@ -314,6 +319,12 @@ static const struct file_operations batadv_fops = {
        .llseek = no_llseek,
 };
 
+/**
+ * batadv_socket_setup() - Create debugfs "socket" file
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_socket_setup(struct batadv_priv *bat_priv)
 {
        struct dentry *d;
@@ -333,7 +344,7 @@ err:
 }
 
 /**
- * batadv_socket_add_packet - schedule an icmp packet to be sent to
+ * batadv_socket_add_packet() - schedule an icmp packet to be sent to
  *  userspace on an icmp socket.
  * @socket_client: the socket this packet belongs to
  * @icmph: pointer to the header of the icmp packet
@@ -390,7 +401,7 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
 }
 
 /**
- * batadv_socket_receive_packet - schedule an icmp packet to be received
+ * batadv_socket_receive_packet() - schedule an icmp packet to be received
  *  locally and sent to userspace.
  * @icmph: pointer to the header of the icmp packet
  * @icmp_len: total length of the icmp packet
index f3fec40aae862b89da46d802729759ba540cd0a5..84cddd01eeab8f77be5bdca6b72bd4032c16a03a 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
index 4ef4bde2cc2d387d0a2020389889f10399349e0a..da004980ab8bfbc4922d1a22838fbf31390b9d36 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -24,6 +25,7 @@
 #include <linux/export.h>
 #include <linux/fcntl.h>
 #include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -86,6 +88,13 @@ static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log,
        return 0;
 }
 
+/**
+ * batadv_debug_log() - Add debug log entry
+ * @bat_priv: the bat priv with all the soft interface information
+ * @fmt: format string
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
 {
        va_list args;
@@ -197,6 +206,12 @@ static const struct file_operations batadv_log_fops = {
        .llseek         = no_llseek,
 };
 
+/**
+ * batadv_debug_log_setup() - Initialize debug log
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_debug_log_setup(struct batadv_priv *bat_priv)
 {
        struct dentry *d;
@@ -222,6 +237,10 @@ err:
        return -ENOMEM;
 }
 
+/**
+ * batadv_debug_log_cleanup() - Destroy debug log
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
 {
        kfree(bat_priv->debug_log);
index 65ce97efa6b5946175f64d300573a532f28387f8..35e02b2b9e72bd046cd342f099ae633f5245efdb 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -44,25 +45,33 @@ static inline void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
 
 /**
  * enum batadv_dbg_level - available log levels
- * @BATADV_DBG_BATMAN: OGM and TQ computations related messages
- * @BATADV_DBG_ROUTES: route added / changed / deleted
- * @BATADV_DBG_TT: translation table messages
- * @BATADV_DBG_BLA: bridge loop avoidance messages
- * @BATADV_DBG_DAT: ARP snooping and DAT related messages
- * @BATADV_DBG_NC: network coding related messages
- * @BATADV_DBG_MCAST: multicast related messages
- * @BATADV_DBG_TP_METER: throughput meter messages
- * @BATADV_DBG_ALL: the union of all the above log levels
  */
 enum batadv_dbg_level {
+       /** @BATADV_DBG_BATMAN: OGM and TQ computations related messages */
        BATADV_DBG_BATMAN       = BIT(0),
+
+       /** @BATADV_DBG_ROUTES: route added / changed / deleted */
        BATADV_DBG_ROUTES       = BIT(1),
+
+       /** @BATADV_DBG_TT: translation table messages */
        BATADV_DBG_TT           = BIT(2),
+
+       /** @BATADV_DBG_BLA: bridge loop avoidance messages */
        BATADV_DBG_BLA          = BIT(3),
+
+       /** @BATADV_DBG_DAT: ARP snooping and DAT related messages */
        BATADV_DBG_DAT          = BIT(4),
+
+       /** @BATADV_DBG_NC: network coding related messages */
        BATADV_DBG_NC           = BIT(5),
+
+       /** @BATADV_DBG_MCAST: multicast related messages */
        BATADV_DBG_MCAST        = BIT(6),
+
+       /** @BATADV_DBG_TP_METER: throughput meter messages */
        BATADV_DBG_TP_METER     = BIT(7),
+
+       /** @BATADV_DBG_ALL: the union of all the above log levels */
        BATADV_DBG_ALL          = 255,
 };
 
@@ -70,7 +79,14 @@ enum batadv_dbg_level {
 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
 __printf(2, 3);
 
-/* possibly ratelimited debug output */
+/**
+ * _batadv_dbg() - Store debug output with(out) ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ratelimited: whether output should be rate limited
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
 #define _batadv_dbg(type, bat_priv, ratelimited, fmt, arg...)          \
        do {                                                            \
                struct batadv_priv *__batpriv = (bat_priv);             \
@@ -89,11 +105,30 @@ static inline void _batadv_dbg(int type __always_unused,
 }
 #endif
 
+/**
+ * batadv_dbg() - Store debug output without ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @arg...: format string and variable arguments
+ */
 #define batadv_dbg(type, bat_priv, arg...) \
        _batadv_dbg(type, bat_priv, 0, ## arg)
+
+/**
+ * batadv_dbg_ratelimited() - Store debug output with ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @arg...: format string and variable arguments
+ */
 #define batadv_dbg_ratelimited(type, bat_priv, arg...) \
        _batadv_dbg(type, bat_priv, 1, ## arg)
 
+/**
+ * batadv_info() - Store message in debug buffer and print it to kmsg buffer
+ * @net_dev: the soft interface net device
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
 #define batadv_info(net_dev, fmt, arg...)                              \
        do {                                                            \
                struct net_device *_netdev = (net_dev);                 \
@@ -101,6 +136,13 @@ static inline void _batadv_dbg(int type __always_unused,
                batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg);      \
                pr_info("%s: " fmt, _netdev->name, ## arg);             \
        } while (0)
+
+/**
+ * batadv_err() - Store error in debug buffer and print it to kmsg buffer
+ * @net_dev: the soft interface net device
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
 #define batadv_err(net_dev, fmt, arg...)                               \
        do {                                                            \
                struct net_device *_netdev = (net_dev);                 \
index 4daed7ad46f21da01f3ab0b3da76b4a1dcd22f70..d31c8266e244ec4e508c719975fbe698ea35047d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
 #include "main.h"
 
 #include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/crc32c.h>
 #include <linux/errno.h>
-#include <linux/fs.h>
 #include <linux/genetlink.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/init.h>
@@ -45,6 +46,7 @@
 #include <linux/workqueue.h>
 #include <net/dsfield.h>
 #include <net/rtnetlink.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -62,7 +64,6 @@
 #include "netlink.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "soft-interface.h"
@@ -139,6 +140,12 @@ static void __exit batadv_exit(void)
        batadv_tt_cache_destroy();
 }
 
+/**
+ * batadv_mesh_init() - Initialize soft interface
+ * @soft_iface: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_mesh_init(struct net_device *soft_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -216,6 +223,10 @@ err:
        return ret;
 }
 
+/**
+ * batadv_mesh_free() - Deinitialize soft interface
+ * @soft_iface: netdev struct of the soft interface
+ */
 void batadv_mesh_free(struct net_device *soft_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -255,8 +266,8 @@ void batadv_mesh_free(struct net_device *soft_iface)
 }
 
 /**
- * batadv_is_my_mac - check if the given mac address belongs to any of the real
- * interfaces in the current mesh
+ * batadv_is_my_mac() - check if the given mac address belongs to any of the
+ *  real interfaces in the current mesh
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address to check
  *
@@ -286,7 +297,7 @@ bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_seq_print_text_primary_if_get - called from debugfs table printing
+ * batadv_seq_print_text_primary_if_get() - called from debugfs table printing
  *  function that requires the primary interface
  * @seq: debugfs table seq_file struct
  *
@@ -323,7 +334,7 @@ out:
 #endif
 
 /**
- * batadv_max_header_len - calculate maximum encapsulation overhead for a
+ * batadv_max_header_len() - calculate maximum encapsulation overhead for a
  *  payload packet
  *
  * Return: the maximum encapsulation overhead in bytes.
@@ -348,7 +359,7 @@ int batadv_max_header_len(void)
 }
 
 /**
- * batadv_skb_set_priority - sets skb priority according to packet content
+ * batadv_skb_set_priority() - sets skb priority according to packet content
  * @skb: the packet to be sent
  * @offset: offset to the packet content
  *
@@ -412,6 +423,16 @@ static int batadv_recv_unhandled_packet(struct sk_buff *skb,
 /* incoming packets with the batman ethertype received on any active hard
  * interface
  */
+
+/**
+ * batadv_batman_skb_recv() - Handle incoming message from an hard interface
+ * @skb: the received packet
+ * @dev: the net device that the packet was received on
+ * @ptype: packet type of incoming packet (ETH_P_BATMAN)
+ * @orig_dev: the original receive net device (e.g. bonded device)
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
                           struct packet_type *ptype,
                           struct net_device *orig_dev)
@@ -535,6 +556,13 @@ static void batadv_recv_handler_init(void)
        batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
 }
 
+/**
+ * batadv_recv_handler_register() - Register handler for batman-adv packet type
+ * @packet_type: batadv_packettype which should be handled
+ * @recv_handler: receive handler for the packet type
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int
 batadv_recv_handler_register(u8 packet_type,
                             int (*recv_handler)(struct sk_buff *,
@@ -552,13 +580,17 @@ batadv_recv_handler_register(u8 packet_type,
        return 0;
 }
 
+/**
+ * batadv_recv_handler_unregister() - Unregister handler for packet type
+ * @packet_type: batadv_packettype which should no longer be handled
+ */
 void batadv_recv_handler_unregister(u8 packet_type)
 {
        batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
 }
 
 /**
- * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
+ * batadv_skb_crc32() - calculate CRC32 of the whole packet and skip bytes in
  *  the header
  * @skb: skb pointing to fragmented socket buffers
  * @payload_ptr: Pointer to position inside the head buffer of the skb
@@ -591,7 +623,7 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
 }
 
 /**
- * batadv_get_vid - extract the VLAN identifier from skb if any
+ * batadv_get_vid() - extract the VLAN identifier from skb if any
  * @skb: the buffer containing the packet
  * @header_len: length of the batman header preceding the ethernet header
  *
@@ -618,7 +650,7 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
 }
 
 /**
- * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
+ * batadv_vlan_ap_isola_get() - return AP isolation status for the given vlan
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier for which the AP isolation attributed as to be
  *  looked up
index edb2f239d04d7fd1f5d0305f87b0b8f05f1e01f4..f7ba3f96d8f36be0f49541fa49454dda6f73bcee 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -24,7 +25,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2017.4"
+#define BATADV_SOURCE_VERSION "2018.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
  */
 #define BATADV_TP_MAX_NUM 5
 
+/**
+ * enum batadv_mesh_state - State of a soft interface
+ */
 enum batadv_mesh_state {
+       /** @BATADV_MESH_INACTIVE: soft interface is not yet running */
        BATADV_MESH_INACTIVE,
+
+       /** @BATADV_MESH_ACTIVE: interface is up and running */
        BATADV_MESH_ACTIVE,
+
+       /** @BATADV_MESH_DEACTIVATING: interface is getting shut down */
        BATADV_MESH_DEACTIVATING,
 };
 
 #define BATADV_BCAST_QUEUE_LEN         256
 #define BATADV_BATMAN_QUEUE_LEN        256
 
+/**
+ * enum batadv_uev_action - action type of uevent
+ */
 enum batadv_uev_action {
+       /** @BATADV_UEV_ADD: gateway was selected (after none was selected) */
        BATADV_UEV_ADD = 0,
+
+       /**
+        * @BATADV_UEV_DEL: selected gateway was removed and none is selected
+        * anymore
+        */
        BATADV_UEV_DEL,
+
+       /**
+        * @BATADV_UEV_CHANGE: a different gateway was selected as based gateway
+        */
        BATADV_UEV_CHANGE,
+
+       /**
+        * @BATADV_UEV_LOOPDETECT: loop was detected which cannot be handled by
+        * bridge loop avoidance
+        */
        BATADV_UEV_LOOPDETECT,
 };
 
+/**
+ * enum batadv_uev_type - Type of uevent
+ */
 enum batadv_uev_type {
+       /** @BATADV_UEV_GW: selected gateway was modified */
        BATADV_UEV_GW = 0,
+
+       /** @BATADV_UEV_BLA: bridge loop avoidance event */
        BATADV_UEV_BLA,
 };
 
@@ -184,16 +217,14 @@ enum batadv_uev_type {
 
 /* Kernel headers */
 
-#include <linux/bitops.h> /* for packet.h */
 #include <linux/compiler.h>
 #include <linux/etherdevice.h>
-#include <linux/if_ether.h> /* for packet.h */
 #include <linux/if_vlan.h>
 #include <linux/jiffies.h>
 #include <linux/percpu.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
-#include "packet.h"
 #include "types.h"
 
 struct net_device;
@@ -202,7 +233,7 @@ struct seq_file;
 struct sk_buff;
 
 /**
- * batadv_print_vid - return printable version of vid information
+ * batadv_print_vid() - return printable version of vid information
  * @vid: the VLAN identifier
  *
  * Return: -1 when no VLAN is used, VLAN id otherwise
@@ -238,7 +269,7 @@ void batadv_recv_handler_unregister(u8 packet_type);
 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr);
 
 /**
- * batadv_compare_eth - Compare two not u16 aligned Ethernet addresses
+ * batadv_compare_eth() - Compare two not u16 aligned Ethernet addresses
  * @data1: Pointer to a six-byte array containing the Ethernet address
  * @data2: Pointer other six-byte array containing the Ethernet address
  *
@@ -252,7 +283,7 @@ static inline bool batadv_compare_eth(const void *data1, const void *data2)
 }
 
 /**
- * batadv_has_timed_out - compares current time (jiffies) and timestamp +
+ * batadv_has_timed_out() - compares current time (jiffies) and timestamp +
  *  timeout
  * @timestamp:         base value to compare with (in jiffies)
  * @timeout:           added to base value before comparing (in milliseconds)
@@ -265,40 +296,96 @@ static inline bool batadv_has_timed_out(unsigned long timestamp,
        return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
 }
 
+/**
+ * batadv_atomic_dec_not_zero() - Decrease unless the number is 0
+ * @v: pointer of type atomic_t
+ *
+ * Return: non-zero if v was not 0, and zero otherwise.
+ */
 #define batadv_atomic_dec_not_zero(v)  atomic_add_unless((v), -1, 0)
 
-/* Returns the smallest signed integer in two's complement with the sizeof x */
+/**
+ * batadv_smallest_signed_int() - Returns the smallest signed integer in two's
+ *  complement with the sizeof x
+ * @x: type of integer
+ *
+ * Return: smallest signed integer of type
+ */
 #define batadv_smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
 
-/* Checks if a sequence number x is a predecessor/successor of y.
- * they handle overflows/underflows and can correctly check for a
- * predecessor/successor unless the variable sequence number has grown by
- * more then 2**(bitwidth(x)-1)-1.
+/**
+ * batadv_seq_before() - Checks if a sequence number x is a predecessor of y
+ * @x: potential predecessor of @y
+ * @y: value to compare @x against
+ *
+ * It handles overflows/underflows and can correctly check for a predecessor
+ * unless the variable sequence number has grown by more then
+ * 2**(bitwidth(x)-1)-1.
+ *
  * This means that for a u8 with the maximum value 255, it would think:
- *  - when adding nothing - it is neither a predecessor nor a successor
- *  - before adding more than 127 to the starting value - it is a predecessor,
- *  - when adding 128 - it is neither a predecessor nor a successor,
- *  - after adding more than 127 to the starting value - it is a successor
+ *
+ * * when adding nothing - it is neither a predecessor nor a successor
+ * * before adding more than 127 to the starting value - it is a predecessor,
+ * * when adding 128 - it is neither a predecessor nor a successor,
+ * * after adding more than 127 to the starting value - it is a successor
+ *
+ * Return: true when x is a predecessor of y, false otherwise
  */
 #define batadv_seq_before(x, y) ({typeof(x)_d1 = (x); \
                                 typeof(y)_d2 = (y); \
                                 typeof(x)_dummy = (_d1 - _d2); \
                                 (void)(&_d1 == &_d2); \
                                 _dummy > batadv_smallest_signed_int(_dummy); })
+
+/**
+ * batadv_seq_after() - Checks if a sequence number x is a successor of y
+ * @x: potential sucessor of @y
+ * @y: value to compare @x against
+ *
+ * It handles overflows/underflows and can correctly check for a successor
+ * unless the variable sequence number has grown by more then
+ * 2**(bitwidth(x)-1)-1.
+ *
+ * This means that for a u8 with the maximum value 255, it would think:
+ *
+ * * when adding nothing - it is neither a predecessor nor a successor
+ * * before adding more than 127 to the starting value - it is a predecessor,
+ * * when adding 128 - it is neither a predecessor nor a successor,
+ * * after adding more than 127 to the starting value - it is a successor
+ *
+ * Return: true when x is a successor of y, false otherwise
+ */
 #define batadv_seq_after(x, y) batadv_seq_before(y, x)
 
-/* Stop preemption on local cpu while incrementing the counter */
+/**
+ * batadv_add_counter() - Add to per cpu statistics counter of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @idx: counter index which should be modified
+ * @count: value to increase counter by
+ *
+ * Stop preemption on local cpu while incrementing the counter
+ */
 static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
                                      size_t count)
 {
        this_cpu_add(bat_priv->bat_counters[idx], count);
 }
 
+/**
+ * batadv_inc_counter() - Increase per cpu statistics counter of soft interface
+ * @b: the bat priv with all the soft interface information
+ * @i: counter index which should be modified
+ */
 #define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
 
-/* Define a macro to reach the control buffer of the skb. The members of the
- * control buffer are defined in struct batadv_skb_cb in types.h.
- * The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h.
+/**
+ * BATADV_SKB_CB() - Get batadv_skb_cb from skb control buffer
+ * @__skb: skb holding the control buffer
+ *
+ * The members of the control buffer are defined in struct batadv_skb_cb in
+ * types.h. The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h.
+ *
+ * Return: pointer to the batadv_skb_cb of the skb
  */
 #define BATADV_SKB_CB(__skb)       ((struct batadv_skb_cb *)&((__skb)->cb[0]))
 
index e553a8770a89a2fb0ae603ba9fed80d59a25bb4a..cbdeb47ec3f606ddfe53a122392d82576914480b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2014-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
@@ -24,7 +25,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/icmpv6.h>
 #include <linux/if_bridge.h>
 #include <linux/if_ether.h>
 #include <net/if_inet6.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
-#include "packet.h"
 #include "translation-table.h"
 #include "tvlv.h"
 
 static void batadv_mcast_mla_update(struct work_struct *work);
 
 /**
- * batadv_mcast_start_timer - schedule the multicast periodic worker
+ * batadv_mcast_start_timer() - schedule the multicast periodic worker
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
@@ -75,7 +76,7 @@ static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_get_bridge - get the bridge on top of the softif if it exists
+ * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
  * @soft_iface: netdev struct of the mesh interface
  *
  * If the given soft interface has a bridge on top then the refcount
@@ -101,7 +102,7 @@ static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
 }
 
 /**
- * batadv_mcast_mla_softif_get - get softif multicast listeners
+ * batadv_mcast_mla_softif_get() - get softif multicast listeners
  * @dev: the device to collect multicast addresses from
  * @mcast_list: a list to put found addresses into
  *
@@ -147,7 +148,7 @@ static int batadv_mcast_mla_softif_get(struct net_device *dev,
 }
 
 /**
- * batadv_mcast_mla_is_duplicate - check whether an address is in a list
+ * batadv_mcast_mla_is_duplicate() - check whether an address is in a list
  * @mcast_addr: the multicast address to check
  * @mcast_list: the list with multicast addresses to search in
  *
@@ -167,7 +168,7 @@ static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
 }
 
 /**
- * batadv_mcast_mla_br_addr_cpy - copy a bridge multicast address
+ * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
  * @dst: destination to write to - a multicast MAC address
  * @src: source to read from - a multicast IP address
  *
@@ -191,7 +192,7 @@ static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
 }
 
 /**
- * batadv_mcast_mla_bridge_get - get bridged-in multicast listeners
+ * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
  * @dev: a bridge slave whose bridge to collect multicast addresses from
  * @mcast_list: a list to put found addresses into
  *
@@ -244,7 +245,7 @@ out:
 }
 
 /**
- * batadv_mcast_mla_list_free - free a list of multicast addresses
+ * batadv_mcast_mla_list_free() - free a list of multicast addresses
  * @mcast_list: the list to free
  *
  * Removes and frees all items in the given mcast_list.
@@ -261,7 +262,7 @@ static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
 }
 
 /**
- * batadv_mcast_mla_tt_retract - clean up multicast listener announcements
+ * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
  * @bat_priv: the bat priv with all the soft interface information
  * @mcast_list: a list of addresses which should _not_ be removed
  *
@@ -297,7 +298,7 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_mla_tt_add - add multicast listener announcements
+ * batadv_mcast_mla_tt_add() - add multicast listener announcements
  * @bat_priv: the bat priv with all the soft interface information
  * @mcast_list: a list of addresses which are going to get added
  *
@@ -333,7 +334,7 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_has_bridge - check whether the soft-iface is bridged
+ * batadv_mcast_has_bridge() - check whether the soft-iface is bridged
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Checks whether there is a bridge on top of our soft interface.
@@ -354,7 +355,8 @@ static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_querier_log - debug output regarding the querier status on link
+ * batadv_mcast_querier_log() - debug output regarding the querier status on
+ *  link
  * @bat_priv: the bat priv with all the soft interface information
  * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
  * @old_state: the previous querier state on our link
@@ -405,7 +407,8 @@ batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
 }
 
 /**
- * batadv_mcast_bridge_log - debug output for topology changes in bridged setups
+ * batadv_mcast_bridge_log() - debug output for topology changes in bridged
+ *  setups
  * @bat_priv: the bat priv with all the soft interface information
  * @bridged: a flag about whether the soft interface is currently bridged or not
  * @querier_ipv4: (maybe) new status of a potential, selected IGMP querier
@@ -444,7 +447,7 @@ batadv_mcast_bridge_log(struct batadv_priv *bat_priv, bool bridged,
 }
 
 /**
- * batadv_mcast_flags_logs - output debug information about mcast flag changes
+ * batadv_mcast_flags_logs() - output debug information about mcast flag changes
  * @bat_priv: the bat priv with all the soft interface information
  * @flags: flags indicating the new multicast state
  *
@@ -470,7 +473,7 @@ static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
 }
 
 /**
- * batadv_mcast_mla_tvlv_update - update multicast tvlv
+ * batadv_mcast_mla_tvlv_update() - update multicast tvlv
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Updates the own multicast tvlv with our current multicast related settings,
@@ -545,7 +548,7 @@ update:
 }
 
 /**
- * __batadv_mcast_mla_update - update the own MLAs
+ * __batadv_mcast_mla_update() - update the own MLAs
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Updates the own multicast listener announcements in the translation
@@ -582,7 +585,7 @@ out:
 }
 
 /**
- * batadv_mcast_mla_update - update the own MLAs
+ * batadv_mcast_mla_update() - update the own MLAs
  * @work: kernel work struct
  *
  * Updates the own multicast listener announcements in the translation
@@ -605,7 +608,7 @@ static void batadv_mcast_mla_update(struct work_struct *work)
 }
 
 /**
- * batadv_mcast_is_report_ipv4 - check for IGMP reports
+ * batadv_mcast_is_report_ipv4() - check for IGMP reports
  * @skb: the ethernet frame destined for the mesh
  *
  * This call might reallocate skb data.
@@ -630,7 +633,8 @@ static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
 }
 
 /**
- * batadv_mcast_forw_mode_check_ipv4 - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
+ *  potential
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the IPv4 packet to check
  * @is_unsnoopable: stores whether the destination is snoopable
@@ -671,7 +675,7 @@ static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_is_report_ipv6 - check for MLD reports
+ * batadv_mcast_is_report_ipv6() - check for MLD reports
  * @skb: the ethernet frame destined for the mesh
  *
  * This call might reallocate skb data.
@@ -695,7 +699,8 @@ static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
 }
 
 /**
- * batadv_mcast_forw_mode_check_ipv6 - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
+ *  potential
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the IPv6 packet to check
  * @is_unsnoopable: stores whether the destination is snoopable
@@ -736,7 +741,7 @@ static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_mode_check - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check() - check for optimized forwarding potential
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the multicast frame to check
  * @is_unsnoopable: stores whether the destination is snoopable
@@ -774,7 +779,7 @@ static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_want_all_ip_count - count nodes with unspecific mcast
+ * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
  *  interest
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: ethernet header of a packet
@@ -798,7 +803,7 @@ static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_tt_node_get - get a multicast tt node
+ * batadv_mcast_forw_tt_node_get() - get a multicast tt node
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the ether header containing the multicast destination
  *
@@ -814,7 +819,7 @@ batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_ipv4_node_get - get a node with an ipv4 flag
+ * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
@@ -841,7 +846,7 @@ batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_forw_ipv6_node_get - get a node with an ipv6 flag
+ * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
@@ -868,7 +873,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_forw_ip_node_get - get a node with an ipv4/ipv6 flag
+ * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: an ethernet header to determine the protocol family from
  *
@@ -892,7 +897,7 @@ batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_unsnoop_node_get - get a node with an unsnoopable flag
+ * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
@@ -919,7 +924,7 @@ batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_forw_mode - check on how to forward a multicast packet
+ * batadv_mcast_forw_mode() - check on how to forward a multicast packet
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: The multicast packet to check
  * @orig: an originator to be set to forward the skb to
@@ -973,7 +978,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_mcast_want_unsnoop_update - update unsnoop counter and list
+ * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node which multicast state might have changed of
  * @mcast_flags: flags indicating the new multicast state
@@ -1018,7 +1023,7 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_want_ipv4_update - update want-all-ipv4 counter and list
+ * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node which multicast state might have changed of
  * @mcast_flags: flags indicating the new multicast state
@@ -1063,7 +1068,7 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_want_ipv6_update - update want-all-ipv6 counter and list
+ * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node which multicast state might have changed of
  * @mcast_flags: flags indicating the new multicast state
@@ -1108,7 +1113,7 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_tvlv_ogm_handler - process incoming multicast tvlv container
+ * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -1164,7 +1169,7 @@ static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_init - initialize the multicast optimizations structures
+ * batadv_mcast_init() - initialize the multicast optimizations structures
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_mcast_init(struct batadv_priv *bat_priv)
@@ -1179,7 +1184,7 @@ void batadv_mcast_init(struct batadv_priv *bat_priv)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_mcast_flags_print_header - print own mcast flags to debugfs table
+ * batadv_mcast_flags_print_header() - print own mcast flags to debugfs table
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: debugfs table seq_file struct
  *
@@ -1220,7 +1225,7 @@ static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_flags_seq_print_text - print the mcast flags of other nodes
+ * batadv_mcast_flags_seq_print_text() - print the mcast flags of other nodes
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -1281,7 +1286,7 @@ int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_mcast_free - free the multicast optimizations structures
+ * batadv_mcast_free() - free the multicast optimizations structures
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_mcast_free(struct batadv_priv *bat_priv)
@@ -1296,7 +1301,7 @@ void batadv_mcast_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_purge_orig - reset originator global mcast state modifications
+ * batadv_mcast_purge_orig() - reset originator global mcast state modifications
  * @orig: the originator which is going to get purged
  */
 void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
index 2a78cddab0e971a466797ddb7032661f2fa55430..3ac06337ab715147906ad91b4768af33003794b2 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2014-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
@@ -25,15 +26,21 @@ struct sk_buff;
 
 /**
  * enum batadv_forw_mode - the way a packet should be forwarded as
- * @BATADV_FORW_ALL: forward the packet to all nodes (currently via classic
- *  flooding)
- * @BATADV_FORW_SINGLE: forward the packet to a single node (currently via the
- *  BATMAN unicast routing protocol)
- * @BATADV_FORW_NONE: don't forward, drop it
  */
 enum batadv_forw_mode {
+       /**
+        * @BATADV_FORW_ALL: forward the packet to all nodes (currently via
+        *  classic flooding)
+        */
        BATADV_FORW_ALL,
+
+       /**
+        * @BATADV_FORW_SINGLE: forward the packet to a single node (currently
+        *  via the BATMAN unicast routing protocol)
+        */
        BATADV_FORW_SINGLE,
+
+       /** @BATADV_FORW_NONE: don't forward, drop it */
        BATADV_FORW_NONE,
 };
 
index ab13b4d58733829413c8b82f66e69733cbecc55b..a823d3899bad37b08bd46c2f1902060257b289fb 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
@@ -23,8 +24,8 @@
 #include <linux/cache.h>
 #include <linux/errno.h>
 #include <linux/export.h>
-#include <linux/fs.h>
 #include <linux/genetlink.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -39,6 +40,7 @@
 #include <net/genetlink.h>
 #include <net/netlink.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -46,7 +48,6 @@
 #include "gateway_client.h"
 #include "hard-interface.h"
 #include "originator.h"
-#include "packet.h"
 #include "soft-interface.h"
 #include "tp_meter.h"
 #include "translation-table.h"
@@ -99,7 +100,7 @@ static const struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
 };
 
 /**
- * batadv_netlink_get_ifindex - Extract an interface index from a message
+ * batadv_netlink_get_ifindex() - Extract an interface index from a message
  * @nlh: Message header
  * @attrtype: Attribute which holds an interface index
  *
@@ -114,7 +115,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
 }
 
 /**
- * batadv_netlink_mesh_info_put - fill in generic information about mesh
+ * batadv_netlink_mesh_info_put() - fill in generic information about mesh
  *  interface
  * @msg: netlink message to be sent back
  * @soft_iface: interface for which the data should be taken
@@ -169,7 +170,7 @@ batadv_netlink_mesh_info_put(struct sk_buff *msg, struct net_device *soft_iface)
 }
 
 /**
- * batadv_netlink_get_mesh_info - handle incoming BATADV_CMD_GET_MESH_INFO
+ * batadv_netlink_get_mesh_info() - handle incoming BATADV_CMD_GET_MESH_INFO
  *  netlink request
  * @skb: received netlink message
  * @info: receiver information
@@ -230,7 +231,7 @@ batadv_netlink_get_mesh_info(struct sk_buff *skb, struct genl_info *info)
 }
 
 /**
- * batadv_netlink_tp_meter_put - Fill information of started tp_meter session
+ * batadv_netlink_tp_meter_put() - Fill information of started tp_meter session
  * @msg: netlink message to be sent back
  * @cookie: tp meter session cookie
  *
@@ -246,7 +247,7 @@ batadv_netlink_tp_meter_put(struct sk_buff *msg, u32 cookie)
 }
 
 /**
- * batadv_netlink_tpmeter_notify - send tp_meter result via netlink to client
+ * batadv_netlink_tpmeter_notify() - send tp_meter result via netlink to client
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: destination of tp_meter session
  * @result: reason for tp meter session stop
@@ -309,7 +310,7 @@ err_genlmsg:
 }
 
 /**
- * batadv_netlink_tp_meter_start - Start a new tp_meter session
+ * batadv_netlink_tp_meter_start() - Start a new tp_meter session
  * @skb: received netlink message
  * @info: receiver information
  *
@@ -386,7 +387,7 @@ batadv_netlink_tp_meter_start(struct sk_buff *skb, struct genl_info *info)
 }
 
 /**
- * batadv_netlink_tp_meter_start - Cancel a running tp_meter session
+ * batadv_netlink_tp_meter_start() - Cancel a running tp_meter session
  * @skb: received netlink message
  * @info: receiver information
  *
@@ -431,7 +432,7 @@ out:
 }
 
 /**
- * batadv_netlink_dump_hardif_entry - Dump one hard interface into a message
+ * batadv_netlink_dump_hardif_entry() - Dump one hard interface into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -473,7 +474,7 @@ batadv_netlink_dump_hardif_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_netlink_dump_hardifs - Dump all hard interface into a messages
+ * batadv_netlink_dump_hardifs() - Dump all hard interface into a messages
  * @msg: Netlink message to dump into
  * @cb: Parameters from query
  *
@@ -620,7 +621,7 @@ struct genl_family batadv_netlink_family __ro_after_init = {
 };
 
 /**
- * batadv_netlink_register - register batadv genl netlink family
+ * batadv_netlink_register() - register batadv genl netlink family
  */
 void __init batadv_netlink_register(void)
 {
@@ -632,7 +633,7 @@ void __init batadv_netlink_register(void)
 }
 
 /**
- * batadv_netlink_unregister - unregister batadv genl netlink family
+ * batadv_netlink_unregister() - unregister batadv genl netlink family
  */
 void batadv_netlink_unregister(void)
 {
index f1cd8c5da96605daff924f4aee00323f56a9e893..0e7e57b69b54ee95adc6a1fb34246dfcd0ebb244 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
index 3604d7899e2cb02dd4a2825cef3ffa490250eab2..b48116bb24ef100c3f3f6c08f76c3d01c39ed9e1 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
@@ -25,7 +26,7 @@
 #include <linux/debugfs.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_packet.h>
 #include <linux/init.h>
@@ -35,6 +36,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
+#include <linux/net.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
 #include <linux/random.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "tvlv.h"
@@ -65,7 +67,7 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if);
 
 /**
- * batadv_nc_init - one-time initialization for network coding
+ * batadv_nc_init() - one-time initialization for network coding
  *
  * Return: 0 on success or negative error number in case of failure
  */
@@ -81,7 +83,7 @@ int __init batadv_nc_init(void)
 }
 
 /**
- * batadv_nc_start_timer - initialise the nc periodic worker
+ * batadv_nc_start_timer() - initialise the nc periodic worker
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
@@ -91,7 +93,7 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_tvlv_container_update - update the network coding tvlv container
+ * batadv_nc_tvlv_container_update() - update the network coding tvlv container
  *  after network coding setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -113,7 +115,7 @@ static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_status_update - update the network coding tvlv container after
+ * batadv_nc_status_update() - update the network coding tvlv container after
  *  network coding setting change
  * @net_dev: the soft interface net device
  */
@@ -125,7 +127,7 @@ void batadv_nc_status_update(struct net_device *net_dev)
 }
 
 /**
- * batadv_nc_tvlv_ogm_handler_v1 - process incoming nc tvlv container
+ * batadv_nc_tvlv_ogm_handler_v1() - process incoming nc tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -144,7 +146,7 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_mesh_init - initialise coding hash table and start house keeping
+ * batadv_nc_mesh_init() - initialise coding hash table and start house keeping
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or negative error number in case of failure
@@ -185,7 +187,7 @@ err:
 }
 
 /**
- * batadv_nc_init_bat_priv - initialise the nc specific bat_priv variables
+ * batadv_nc_init_bat_priv() - initialise the nc specific bat_priv variables
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
@@ -197,7 +199,7 @@ void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_init_orig - initialise the nc fields of an orig_node
+ * batadv_nc_init_orig() - initialise the nc fields of an orig_node
  * @orig_node: the orig_node which is going to be initialised
  */
 void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
@@ -209,8 +211,8 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_nc_node_release - release nc_node from lists and queue for free after
- *  rcu grace period
+ * batadv_nc_node_release() - release nc_node from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the nc_node
  */
 static void batadv_nc_node_release(struct kref *ref)
@@ -224,7 +226,7 @@ static void batadv_nc_node_release(struct kref *ref)
 }
 
 /**
- * batadv_nc_node_put - decrement the nc_node refcounter and possibly
+ * batadv_nc_node_put() - decrement the nc_node refcounter and possibly
  *  release it
  * @nc_node: nc_node to be free'd
  */
@@ -234,8 +236,8 @@ static void batadv_nc_node_put(struct batadv_nc_node *nc_node)
 }
 
 /**
- * batadv_nc_path_release - release nc_path from lists and queue for free after
- *  rcu grace period
+ * batadv_nc_path_release() - release nc_path from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the nc_path
  */
 static void batadv_nc_path_release(struct kref *ref)
@@ -248,7 +250,7 @@ static void batadv_nc_path_release(struct kref *ref)
 }
 
 /**
- * batadv_nc_path_put - decrement the nc_path refcounter and possibly
+ * batadv_nc_path_put() - decrement the nc_path refcounter and possibly
  *  release it
  * @nc_path: nc_path to be free'd
  */
@@ -258,7 +260,7 @@ static void batadv_nc_path_put(struct batadv_nc_path *nc_path)
 }
 
 /**
- * batadv_nc_packet_free - frees nc packet
+ * batadv_nc_packet_free() - frees nc packet
  * @nc_packet: the nc packet to free
  * @dropped: whether the packet is freed because is is dropped
  */
@@ -275,7 +277,7 @@ static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet,
 }
 
 /**
- * batadv_nc_to_purge_nc_node - checks whether an nc node has to be purged
+ * batadv_nc_to_purge_nc_node() - checks whether an nc node has to be purged
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_node: the nc node to check
  *
@@ -291,7 +293,7 @@ static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_to_purge_nc_path_coding - checks whether an nc path has timed out
+ * batadv_nc_to_purge_nc_path_coding() - checks whether an nc path has timed out
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path to check
  *
@@ -311,7 +313,8 @@ static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_to_purge_nc_path_decoding - checks whether an nc path has timed out
+ * batadv_nc_to_purge_nc_path_decoding() - checks whether an nc path has timed
+ *  out
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path to check
  *
@@ -331,7 +334,7 @@ static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_purge_orig_nc_nodes - go through list of nc nodes and purge stale
+ * batadv_nc_purge_orig_nc_nodes() - go through list of nc nodes and purge stale
  *  entries
  * @bat_priv: the bat priv with all the soft interface information
  * @list: list of nc nodes
@@ -369,7 +372,7 @@ batadv_nc_purge_orig_nc_nodes(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_purge_orig - purges all nc node data attached of the given
+ * batadv_nc_purge_orig() - purges all nc node data attached of the given
  *  originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig_node with the nc node entries to be purged
@@ -395,8 +398,8 @@ void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_purge_orig_hash - traverse entire originator hash to check if they
- *  have timed out nc nodes
+ * batadv_nc_purge_orig_hash() - traverse entire originator hash to check if
+ *  they have timed out nc nodes
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
@@ -422,7 +425,7 @@ static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_purge_paths - traverse all nc paths part of the hash and remove
+ * batadv_nc_purge_paths() - traverse all nc paths part of the hash and remove
  *  unused ones
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: hash table containing the nc paths to check
@@ -481,7 +484,7 @@ static void batadv_nc_purge_paths(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_hash_key_gen - computes the nc_path hash key
+ * batadv_nc_hash_key_gen() - computes the nc_path hash key
  * @key: buffer to hold the final hash key
  * @src: source ethernet mac address going into the hash key
  * @dst: destination ethernet mac address going into the hash key
@@ -494,7 +497,7 @@ static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src,
 }
 
 /**
- * batadv_nc_hash_choose - compute the hash value for an nc path
+ * batadv_nc_hash_choose() - compute the hash value for an nc path
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -512,7 +515,7 @@ static u32 batadv_nc_hash_choose(const void *data, u32 size)
 }
 
 /**
- * batadv_nc_hash_compare - comparing function used in the network coding hash
+ * batadv_nc_hash_compare() - comparing function used in the network coding hash
  *  tables
  * @node: node in the local table
  * @data2: second object to compare the node to
@@ -538,7 +541,7 @@ static bool batadv_nc_hash_compare(const struct hlist_node *node,
 }
 
 /**
- * batadv_nc_hash_find - search for an existing nc path and return it
+ * batadv_nc_hash_find() - search for an existing nc path and return it
  * @hash: hash table containing the nc path
  * @data: search key
  *
@@ -575,7 +578,7 @@ batadv_nc_hash_find(struct batadv_hashtable *hash,
 }
 
 /**
- * batadv_nc_send_packet - send non-coded packet and free nc_packet struct
+ * batadv_nc_send_packet() - send non-coded packet and free nc_packet struct
  * @nc_packet: the nc packet to send
  */
 static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
@@ -586,7 +589,7 @@ static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
 }
 
 /**
- * batadv_nc_sniffed_purge - Checks timestamp of given sniffed nc_packet.
+ * batadv_nc_sniffed_purge() - Checks timestamp of given sniffed nc_packet.
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path the packet belongs to
  * @nc_packet: the nc packet to be checked
@@ -625,7 +628,7 @@ out:
 }
 
 /**
- * batadv_nc_fwd_flush - Checks the timestamp of the given nc packet.
+ * batadv_nc_fwd_flush() - Checks the timestamp of the given nc packet.
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path the packet belongs to
  * @nc_packet: the nc packet to be checked
@@ -663,8 +666,8 @@ static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_process_nc_paths - traverse given nc packet pool and free timed out
- *  nc packets
+ * batadv_nc_process_nc_paths() - traverse given nc packet pool and free timed
+ *  out nc packets
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: to be processed hash table
  * @process_fn: Function called to process given nc packet. Should return true
@@ -709,7 +712,8 @@ batadv_nc_process_nc_paths(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_worker - periodic task for house keeping related to network coding
+ * batadv_nc_worker() - periodic task for house keeping related to network
+ *  coding
  * @work: kernel work struct
  */
 static void batadv_nc_worker(struct work_struct *work)
@@ -749,8 +753,8 @@ static void batadv_nc_worker(struct work_struct *work)
 }
 
 /**
- * batadv_can_nc_with_orig - checks whether the given orig node is suitable for
- *  coding or not
+ * batadv_can_nc_with_orig() - checks whether the given orig node is suitable
+ *  for coding or not
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: neighboring orig node which may be used as nc candidate
  * @ogm_packet: incoming ogm packet also used for the checks
@@ -790,7 +794,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_find_nc_node - search for an existing nc node and return it
+ * batadv_nc_find_nc_node() - search for an existing nc node and return it
  * @orig_node: orig node originating the ogm packet
  * @orig_neigh_node: neighboring orig node from which we received the ogm packet
  *  (can be equal to orig_node)
@@ -830,7 +834,7 @@ batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_nc_get_nc_node - retrieves an nc node or creates the entry if it was
+ * batadv_nc_get_nc_node() - retrieves an nc node or creates the entry if it was
  *  not found
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node originating the ogm packet
@@ -890,7 +894,7 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node
+ * batadv_nc_update_nc_node() - updates stored incoming and outgoing nc node
  *  structs (best called on incoming OGMs)
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node originating the ogm packet
@@ -945,7 +949,7 @@ out:
 }
 
 /**
- * batadv_nc_get_path - get existing nc_path or allocate a new one
+ * batadv_nc_get_path() - get existing nc_path or allocate a new one
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: hash table containing the nc path
  * @src: ethernet source address - first half of the nc path search key
@@ -1006,7 +1010,7 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_random_weight_tq - scale the receivers TQ-value to avoid unfair
+ * batadv_nc_random_weight_tq() - scale the receivers TQ-value to avoid unfair
  *  selection of a receiver with slightly lower TQ than the other
  * @tq: to be weighted tq value
  *
@@ -1029,7 +1033,7 @@ static u8 batadv_nc_random_weight_tq(u8 tq)
 }
 
 /**
- * batadv_nc_memxor - XOR destination with source
+ * batadv_nc_memxor() - XOR destination with source
  * @dst: byte array to XOR into
  * @src: byte array to XOR from
  * @len: length of destination array
@@ -1043,7 +1047,7 @@ static void batadv_nc_memxor(char *dst, const char *src, unsigned int len)
 }
 
 /**
- * batadv_nc_code_packets - code a received unicast_packet with an nc packet
+ * batadv_nc_code_packets() - code a received unicast_packet with an nc packet
  *  into a coded_packet and send it
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to forward
@@ -1236,7 +1240,7 @@ out:
 }
 
 /**
- * batadv_nc_skb_coding_possible - true if a decoded skb is available at dst.
+ * batadv_nc_skb_coding_possible() - true if a decoded skb is available at dst.
  * @skb: data skb to forward
  * @dst: destination mac address of the other skb to code with
  * @src: source mac address of skb
@@ -1260,7 +1264,7 @@ static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
 }
 
 /**
- * batadv_nc_path_search - Find the coding path matching in_nc_node and
+ * batadv_nc_path_search() - Find the coding path matching in_nc_node and
  *  out_nc_node to retrieve a buffered packet that can be used for coding.
  * @bat_priv: the bat priv with all the soft interface information
  * @in_nc_node: pointer to skb next hop's neighbor nc node
@@ -1328,8 +1332,8 @@ batadv_nc_path_search(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_src_search - Loops through the list of neighoring nodes of the
- *  skb's sender (may be equal to the originator).
+ * batadv_nc_skb_src_search() - Loops through the list of neighoring nodes of
+ *  the skb's sender (may be equal to the originator).
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to forward
  * @eth_dst: next hop mac address of skb
@@ -1374,7 +1378,7 @@ batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_store_before_coding - set the ethernet src and dst of the
+ * batadv_nc_skb_store_before_coding() - set the ethernet src and dst of the
  *  unicast skb before it is stored for use in later decoding
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to store
@@ -1409,7 +1413,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_dst_search - Loops through list of neighboring nodes to dst.
+ * batadv_nc_skb_dst_search() - Loops through list of neighboring nodes to dst.
  * @skb: data skb to forward
  * @neigh_node: next hop to forward packet to
  * @ethhdr: pointer to the ethernet header inside the skb
@@ -1467,7 +1471,7 @@ static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_skb_add_to_path - buffer skb for later encoding / decoding
+ * batadv_nc_skb_add_to_path() - buffer skb for later encoding / decoding
  * @skb: skb to add to path
  * @nc_path: path to add skb to
  * @neigh_node: next hop to forward packet to
@@ -1502,7 +1506,7 @@ static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_skb_forward - try to code a packet or add it to the coding packet
+ * batadv_nc_skb_forward() - try to code a packet or add it to the coding packet
  *  buffer
  * @skb: data skb to forward
  * @neigh_node: next hop to forward packet to
@@ -1559,8 +1563,8 @@ out:
 }
 
 /**
- * batadv_nc_skb_store_for_decoding - save a clone of the skb which can be used
- *  when decoding coded packets
+ * batadv_nc_skb_store_for_decoding() - save a clone of the skb which can be
+ *  used when decoding coded packets
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to store
  */
@@ -1620,7 +1624,7 @@ out:
 }
 
 /**
- * batadv_nc_skb_store_sniffed_unicast - check if a received unicast packet
+ * batadv_nc_skb_store_sniffed_unicast() - check if a received unicast packet
  *  should be saved in the decoding buffer and, if so, store it there
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: unicast skb to store
@@ -1640,7 +1644,7 @@ void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_decode_packet - decode given skb using the decode data stored
+ * batadv_nc_skb_decode_packet() - decode given skb using the decode data stored
  *  in nc_packet
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: unicast skb to decode
@@ -1734,7 +1738,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_find_decoding_packet - search through buffered decoding data to
+ * batadv_nc_find_decoding_packet() - search through buffered decoding data to
  *  find the data needed to decode the coded packet
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: pointer to the ethernet header inside the coded packet
@@ -1799,7 +1803,7 @@ batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_recv_coded_packet - try to decode coded packet and enqueue the
+ * batadv_nc_recv_coded_packet() - try to decode coded packet and enqueue the
  *  resulting unicast packet
  * @skb: incoming coded packet
  * @recv_if: pointer to interface this packet was received on
@@ -1874,7 +1878,7 @@ free_skb:
 }
 
 /**
- * batadv_nc_mesh_free - clean up network coding memory
+ * batadv_nc_mesh_free() - clean up network coding memory
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
@@ -1891,7 +1895,7 @@ void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_nc_nodes_seq_print_text - print the nc node information
+ * batadv_nc_nodes_seq_print_text() - print the nc node information
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -1954,7 +1958,7 @@ out:
 }
 
 /**
- * batadv_nc_init_debugfs - create nc folder and related files in debugfs
+ * batadv_nc_init_debugfs() - create nc folder and related files in debugfs
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or negative error number in case of failure
index c66efb81d2f4154ed265e960632fa5c3f0e8aee8..adaeafa4f71ef4c02971fe7cb0051f0d856636d6 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
index 2967b86c13da10d14d527fe7bc89f1faaaef64c2..58a7d9274435cffe99fdb661ce7005f5f6da35c1 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -21,7 +22,7 @@
 #include <linux/atomic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/kref.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/rculist.h>
+#include <linux/rcupdate.h>
 #include <linux/seq_file.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/stddef.h>
 #include <linux/workqueue.h>
 #include <net/sock.h>
 #include <uapi/linux/batman_adv.h>
 /* hash class keys */
 static struct lock_class_key batadv_orig_hash_lock_class_key;
 
+/**
+ * batadv_orig_hash_find() - Find and return originator from orig_hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @data: mac address of the originator
+ *
+ * Return: orig_node (with increased refcnt), NULL on errors
+ */
+struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
+{
+       struct batadv_hashtable *hash = bat_priv->orig_hash;
+       struct hlist_head *head;
+       struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
+       int index;
+
+       if (!hash)
+               return NULL;
+
+       index = batadv_choose_orig(data, hash->size);
+       head = &hash->table[index];
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+               if (!batadv_compare_eth(orig_node, data))
+                       continue;
+
+               if (!kref_get_unless_zero(&orig_node->refcount))
+                       continue;
+
+               orig_node_tmp = orig_node;
+               break;
+       }
+       rcu_read_unlock();
+
+       return orig_node_tmp;
+}
+
 static void batadv_purge_orig(struct work_struct *work);
 
 /**
- * batadv_compare_orig - comparing function used in the originator hash table
+ * batadv_compare_orig() - comparing function used in the originator hash table
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
@@ -73,7 +113,7 @@ bool batadv_compare_orig(const struct hlist_node *node, const void *data2)
 }
 
 /**
- * batadv_orig_node_vlan_get - get an orig_node_vlan object
+ * batadv_orig_node_vlan_get() - get an orig_node_vlan object
  * @orig_node: the originator serving the VLAN
  * @vid: the VLAN identifier
  *
@@ -104,7 +144,7 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
+ * batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan
  *  object
  * @orig_node: the originator serving the VLAN
  * @vid: the VLAN identifier
@@ -145,7 +185,7 @@ out:
 }
 
 /**
- * batadv_orig_node_vlan_release - release originator-vlan object from lists
+ * batadv_orig_node_vlan_release() - release originator-vlan object from lists
  *  and queue for free after rcu grace period
  * @ref: kref pointer of the originator-vlan object
  */
@@ -159,7 +199,7 @@ static void batadv_orig_node_vlan_release(struct kref *ref)
 }
 
 /**
- * batadv_orig_node_vlan_put - decrement the refcounter and possibly release
+ * batadv_orig_node_vlan_put() - decrement the refcounter and possibly release
  *  the originator-vlan object
  * @orig_vlan: the originator-vlan object to release
  */
@@ -168,6 +208,12 @@ void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan)
        kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release);
 }
 
+/**
+ * batadv_originator_init() - Initialize all originator structures
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_originator_init(struct batadv_priv *bat_priv)
 {
        if (bat_priv->orig_hash)
@@ -193,7 +239,7 @@ err:
 }
 
 /**
- * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
+ * batadv_neigh_ifinfo_release() - release neigh_ifinfo from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the neigh_ifinfo
  */
@@ -210,7 +256,7 @@ static void batadv_neigh_ifinfo_release(struct kref *ref)
 }
 
 /**
- * batadv_neigh_ifinfo_put - decrement the refcounter and possibly release
+ * batadv_neigh_ifinfo_put() - decrement the refcounter and possibly release
  *  the neigh_ifinfo
  * @neigh_ifinfo: the neigh_ifinfo object to release
  */
@@ -220,7 +266,7 @@ void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo)
 }
 
 /**
- * batadv_hardif_neigh_release - release hardif neigh node from lists and
+ * batadv_hardif_neigh_release() - release hardif neigh node from lists and
  *  queue for free after rcu grace period
  * @ref: kref pointer of the neigh_node
  */
@@ -240,7 +286,7 @@ static void batadv_hardif_neigh_release(struct kref *ref)
 }
 
 /**
- * batadv_hardif_neigh_put - decrement the hardif neighbors refcounter
+ * batadv_hardif_neigh_put() - decrement the hardif neighbors refcounter
  *  and possibly release it
  * @hardif_neigh: hardif neigh neighbor to free
  */
@@ -250,7 +296,7 @@ void batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh)
 }
 
 /**
- * batadv_neigh_node_release - release neigh_node from lists and queue for
+ * batadv_neigh_node_release() - release neigh_node from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the neigh_node
  */
@@ -275,7 +321,7 @@ static void batadv_neigh_node_release(struct kref *ref)
 }
 
 /**
- * batadv_neigh_node_put - decrement the neighbors refcounter and possibly
+ * batadv_neigh_node_put() - decrement the neighbors refcounter and possibly
  *  release it
  * @neigh_node: neigh neighbor to free
  */
@@ -285,7 +331,7 @@ void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node)
 }
 
 /**
- * batadv_orig_router_get - router to the originator depending on iface
+ * batadv_orig_router_get() - router to the originator depending on iface
  * @orig_node: the orig node for the router
  * @if_outgoing: the interface where the payload packet has been received or
  *  the OGM should be sent to
@@ -318,7 +364,7 @@ batadv_orig_router_get(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
+ * batadv_orig_ifinfo_get() - find the ifinfo from an orig_node
  * @orig_node: the orig node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -350,7 +396,7 @@ batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
+ * batadv_orig_ifinfo_new() - search and possibly create an orig_ifinfo object
  * @orig_node: the orig node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -396,7 +442,7 @@ out:
 }
 
 /**
- * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
+ * batadv_neigh_ifinfo_get() - find the ifinfo from an neigh_node
  * @neigh: the neigh node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -429,7 +475,7 @@ batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
 }
 
 /**
- * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
+ * batadv_neigh_ifinfo_new() - search and possibly create an neigh_ifinfo object
  * @neigh: the neigh node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -472,7 +518,7 @@ out:
 }
 
 /**
- * batadv_neigh_node_get - retrieve a neighbour from the list
+ * batadv_neigh_node_get() - retrieve a neighbour from the list
  * @orig_node: originator which the neighbour belongs to
  * @hard_iface: the interface where this neighbour is connected to
  * @addr: the address of the neighbour
@@ -509,7 +555,7 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_hardif_neigh_create - create a hardif neighbour node
+ * batadv_hardif_neigh_create() - create a hardif neighbour node
  * @hard_iface: the interface this neighbour is connected to
  * @neigh_addr: the interface address of the neighbour to retrieve
  * @orig_node: originator object representing the neighbour
@@ -555,7 +601,7 @@ out:
 }
 
 /**
- * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
+ * batadv_hardif_neigh_get_or_create() - retrieve or create a hardif neighbour
  *  node
  * @hard_iface: the interface this neighbour is connected to
  * @neigh_addr: the interface address of the neighbour to retrieve
@@ -579,7 +625,7 @@ batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
 }
 
 /**
- * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
+ * batadv_hardif_neigh_get() - retrieve a hardif neighbour from the list
  * @hard_iface: the interface where this neighbour is connected to
  * @neigh_addr: the address of the neighbour
  *
@@ -611,7 +657,7 @@ batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
 }
 
 /**
- * batadv_neigh_node_create - create a neigh node object
+ * batadv_neigh_node_create() - create a neigh node object
  * @orig_node: originator object representing the neighbour
  * @hard_iface: the interface where the neighbour is connected to
  * @neigh_addr: the mac address of the neighbour interface
@@ -676,7 +722,7 @@ out:
 }
 
 /**
- * batadv_neigh_node_get_or_create - retrieve or create a neigh node object
+ * batadv_neigh_node_get_or_create() - retrieve or create a neigh node object
  * @orig_node: originator object representing the neighbour
  * @hard_iface: the interface where the neighbour is connected to
  * @neigh_addr: the mac address of the neighbour interface
@@ -700,7 +746,7 @@ batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
+ * batadv_hardif_neigh_seq_print_text() - print the single hop neighbour list
  * @seq: neighbour table seq_file struct
  * @offset: not used
  *
@@ -735,8 +781,8 @@ int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_hardif_neigh_dump - Dump to netlink the neighbor infos for a specific
- *  outgoing interface
+ * batadv_hardif_neigh_dump() - Dump to netlink the neighbor infos for a
+ *  specific outgoing interface
  * @msg: message to dump into
  * @cb: parameters for the dump
  *
@@ -812,7 +858,7 @@ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)
 }
 
 /**
- * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
+ * batadv_orig_ifinfo_release() - release orig_ifinfo from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the orig_ifinfo
  */
@@ -835,7 +881,7 @@ static void batadv_orig_ifinfo_release(struct kref *ref)
 }
 
 /**
- * batadv_orig_ifinfo_put - decrement the refcounter and possibly release
+ * batadv_orig_ifinfo_put() - decrement the refcounter and possibly release
  *  the orig_ifinfo
  * @orig_ifinfo: the orig_ifinfo object to release
  */
@@ -845,7 +891,7 @@ void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo)
 }
 
 /**
- * batadv_orig_node_free_rcu - free the orig_node
+ * batadv_orig_node_free_rcu() - free the orig_node
  * @rcu: rcu pointer of the orig_node
  */
 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
@@ -866,7 +912,7 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_orig_node_release - release orig_node from lists and queue for
+ * batadv_orig_node_release() - release orig_node from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the orig_node
  */
@@ -917,7 +963,7 @@ static void batadv_orig_node_release(struct kref *ref)
 }
 
 /**
- * batadv_orig_node_put - decrement the orig node refcounter and possibly
+ * batadv_orig_node_put() - decrement the orig node refcounter and possibly
  *  release it
  * @orig_node: the orig node to free
  */
@@ -926,6 +972,10 @@ void batadv_orig_node_put(struct batadv_orig_node *orig_node)
        kref_put(&orig_node->refcount, batadv_orig_node_release);
 }
 
+/**
+ * batadv_originator_free() - Free all originator structures
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_originator_free(struct batadv_priv *bat_priv)
 {
        struct batadv_hashtable *hash = bat_priv->orig_hash;
@@ -959,7 +1009,7 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_orig_node_new - creates a new orig_node
+ * batadv_orig_node_new() - creates a new orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the originator
  *
@@ -1038,7 +1088,7 @@ free_orig_node:
 }
 
 /**
- * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
+ * batadv_purge_neigh_ifinfo() - purge obsolete ifinfo entries from neighbor
  * @bat_priv: the bat priv with all the soft interface information
  * @neigh: orig node which is to be checked
  */
@@ -1079,7 +1129,7 @@ batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
+ * batadv_purge_orig_ifinfo() - purge obsolete ifinfo entries from originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
@@ -1131,7 +1181,7 @@ batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_purge_orig_neighbors - purges neighbors from originator
+ * batadv_purge_orig_neighbors() - purges neighbors from originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
@@ -1189,7 +1239,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_find_best_neighbor - finds the best neighbor after purging
+ * batadv_find_best_neighbor() - finds the best neighbor after purging
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  * @if_outgoing: the interface for which the metric should be compared
@@ -1224,7 +1274,7 @@ batadv_find_best_neighbor(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_purge_orig_node - purges obsolete information from an orig_node
+ * batadv_purge_orig_node() - purges obsolete information from an orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
@@ -1341,12 +1391,24 @@ static void batadv_purge_orig(struct work_struct *work)
                           msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
 }
 
+/**
+ * batadv_purge_orig_ref() - Purge all outdated originators
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
 {
        _batadv_purge_orig(bat_priv);
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_orig_seq_print_text() - Print the originator table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1376,7 +1438,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
 }
 
 /**
- * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
+ * batadv_orig_hardif_seq_print_text() - writes originator infos for a specific
  *  outgoing interface
  * @seq: debugfs table seq_file struct
  * @offset: not used
@@ -1423,7 +1485,7 @@ out:
 #endif
 
 /**
- * batadv_orig_dump - Dump to netlink the originator infos for a specific
+ * batadv_orig_dump() - Dump to netlink the originator infos for a specific
  *  outgoing interface
  * @msg: message to dump into
  * @cb: parameters for the dump
@@ -1499,6 +1561,13 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
        return ret;
 }
 
+/**
+ * batadv_orig_hash_add_if() - Add interface to originators in orig_hash
+ * @hard_iface: hard interface to add (already slave of the soft interface)
+ * @max_if_num: new number of interfaces
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
                            int max_if_num)
 {
@@ -1534,6 +1603,13 @@ err:
        return -ENOMEM;
 }
 
+/**
+ * batadv_orig_hash_del_if() - Remove interface from originators in orig_hash
+ * @hard_iface: hard interface to remove (still slave of the soft interface)
+ * @max_if_num: new number of interfaces
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
                            int max_if_num)
 {
index d94220a6d21acf58d55fcf7a179151c67392b055..8e543a3cdc6c310297be13a5253ef33819b9df68 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
 #include <linux/compiler.h>
 #include <linux/if_ether.h>
 #include <linux/jhash.h>
-#include <linux/kref.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/stddef.h>
 #include <linux/types.h>
 
-#include "hash.h"
-
 struct netlink_callback;
 struct seq_file;
 struct sk_buff;
@@ -89,8 +84,13 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
                          unsigned short vid);
 void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan);
 
-/* hashfunction to choose an entry in a hash table of given size
- * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
+/**
+ * batadv_choose_orig() - Return the index of the orig entry in the hash table
+ * @data: mac address of the originator node
+ * @size: the size of the hash table
+ *
+ * Return: the hash index where the object represented by @data should be
+ * stored at.
  */
 static inline u32 batadv_choose_orig(const void *data, u32 size)
 {
@@ -100,34 +100,7 @@ static inline u32 batadv_choose_orig(const void *data, u32 size)
        return hash % size;
 }
 
-static inline struct batadv_orig_node *
-batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
-{
-       struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_head *head;
-       struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
-       int index;
-
-       if (!hash)
-               return NULL;
-
-       index = batadv_choose_orig(data, hash->size);
-       head = &hash->table[index];
-
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-               if (!batadv_compare_eth(orig_node, data))
-                       continue;
-
-               if (!kref_get_unless_zero(&orig_node->refcount))
-                       continue;
-
-               orig_node_tmp = orig_node;
-               break;
-       }
-       rcu_read_unlock();
-
-       return orig_node_tmp;
-}
+struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data);
 
 #endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
deleted file mode 100644 (file)
index 8e8a5db..0000000
+++ /dev/null
@@ -1,621 +0,0 @@
-/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _NET_BATMAN_ADV_PACKET_H_
-#define _NET_BATMAN_ADV_PACKET_H_
-
-#include <asm/byteorder.h>
-#include <linux/types.h>
-
-#define batadv_tp_is_error(n) ((u8)(n) > 127 ? 1 : 0)
-
-/**
- * enum batadv_packettype - types for batman-adv encapsulated packets
- * @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV
- * @BATADV_BCAST: broadcast packets carrying broadcast payload
- * @BATADV_CODED: network coded packets
- * @BATADV_ELP: echo location packets for B.A.T.M.A.N. V
- * @BATADV_OGM2: originator messages for B.A.T.M.A.N. V
- *
- * @BATADV_UNICAST: unicast packets carrying unicast payload traffic
- * @BATADV_UNICAST_FRAG: unicast packets carrying a fragment of the original
- *     payload packet
- * @BATADV_UNICAST_4ADDR: unicast packet including the originator address of
- *     the sender
- * @BATADV_ICMP: unicast packet like IP ICMP used for ping or traceroute
- * @BATADV_UNICAST_TVLV: unicast packet carrying TVLV containers
- */
-enum batadv_packettype {
-       /* 0x00 - 0x3f: local packets or special rules for handling */
-       BATADV_IV_OGM           = 0x00,
-       BATADV_BCAST            = 0x01,
-       BATADV_CODED            = 0x02,
-       BATADV_ELP              = 0x03,
-       BATADV_OGM2             = 0x04,
-       /* 0x40 - 0x7f: unicast */
-#define BATADV_UNICAST_MIN     0x40
-       BATADV_UNICAST          = 0x40,
-       BATADV_UNICAST_FRAG     = 0x41,
-       BATADV_UNICAST_4ADDR    = 0x42,
-       BATADV_ICMP             = 0x43,
-       BATADV_UNICAST_TVLV     = 0x44,
-#define BATADV_UNICAST_MAX     0x7f
-       /* 0x80 - 0xff: reserved */
-};
-
-/**
- * enum batadv_subtype - packet subtype for unicast4addr
- * @BATADV_P_DATA: user payload
- * @BATADV_P_DAT_DHT_GET: DHT request message
- * @BATADV_P_DAT_DHT_PUT: DHT store message
- * @BATADV_P_DAT_CACHE_REPLY: ARP reply generated by DAT
- */
-enum batadv_subtype {
-       BATADV_P_DATA                   = 0x01,
-       BATADV_P_DAT_DHT_GET            = 0x02,
-       BATADV_P_DAT_DHT_PUT            = 0x03,
-       BATADV_P_DAT_CACHE_REPLY        = 0x04,
-};
-
-/* this file is included by batctl which needs these defines */
-#define BATADV_COMPAT_VERSION 15
-
-/**
- * enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets
- * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was
- *     previously received from someone else than the best neighbor.
- * @BATADV_PRIMARIES_FIRST_HOP: flag unused.
- * @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a
- *     one hop neighbor on the interface where it was originally received.
- */
-enum batadv_iv_flags {
-       BATADV_NOT_BEST_NEXT_HOP   = BIT(0),
-       BATADV_PRIMARIES_FIRST_HOP = BIT(1),
-       BATADV_DIRECTLINK          = BIT(2),
-};
-
-/* ICMP message types */
-enum batadv_icmp_packettype {
-       BATADV_ECHO_REPLY              = 0,
-       BATADV_DESTINATION_UNREACHABLE = 3,
-       BATADV_ECHO_REQUEST            = 8,
-       BATADV_TTL_EXCEEDED            = 11,
-       BATADV_PARAMETER_PROBLEM       = 12,
-       BATADV_TP                      = 15,
-};
-
-/**
- * enum batadv_mcast_flags - flags for multicast capabilities and settings
- * @BATADV_MCAST_WANT_ALL_UNSNOOPABLES: we want all packets destined for
- *  224.0.0.0/24 or ff02::1
- * @BATADV_MCAST_WANT_ALL_IPV4: we want all IPv4 multicast packets
- * @BATADV_MCAST_WANT_ALL_IPV6: we want all IPv6 multicast packets
- */
-enum batadv_mcast_flags {
-       BATADV_MCAST_WANT_ALL_UNSNOOPABLES      = BIT(0),
-       BATADV_MCAST_WANT_ALL_IPV4              = BIT(1),
-       BATADV_MCAST_WANT_ALL_IPV6              = BIT(2),
-};
-
-/* tt data subtypes */
-#define BATADV_TT_DATA_TYPE_MASK 0x0F
-
-/**
- * enum batadv_tt_data_flags - flags for tt data tvlv
- * @BATADV_TT_OGM_DIFF: TT diff propagated through OGM
- * @BATADV_TT_REQUEST: TT request message
- * @BATADV_TT_RESPONSE: TT response message
- * @BATADV_TT_FULL_TABLE: contains full table to replace existing table
- */
-enum batadv_tt_data_flags {
-       BATADV_TT_OGM_DIFF   = BIT(0),
-       BATADV_TT_REQUEST    = BIT(1),
-       BATADV_TT_RESPONSE   = BIT(2),
-       BATADV_TT_FULL_TABLE = BIT(4),
-};
-
-/**
- * enum batadv_vlan_flags - flags for the four MSB of any vlan ID field
- * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
- */
-enum batadv_vlan_flags {
-       BATADV_VLAN_HAS_TAG     = BIT(15),
-};
-
-/* claim frame types for the bridge loop avoidance */
-enum batadv_bla_claimframe {
-       BATADV_CLAIM_TYPE_CLAIM         = 0x00,
-       BATADV_CLAIM_TYPE_UNCLAIM       = 0x01,
-       BATADV_CLAIM_TYPE_ANNOUNCE      = 0x02,
-       BATADV_CLAIM_TYPE_REQUEST       = 0x03,
-       BATADV_CLAIM_TYPE_LOOPDETECT    = 0x04,
-};
-
-/**
- * enum batadv_tvlv_type - tvlv type definitions
- * @BATADV_TVLV_GW: gateway tvlv
- * @BATADV_TVLV_DAT: distributed arp table tvlv
- * @BATADV_TVLV_NC: network coding tvlv
- * @BATADV_TVLV_TT: translation table tvlv
- * @BATADV_TVLV_ROAM: roaming advertisement tvlv
- * @BATADV_TVLV_MCAST: multicast capability tvlv
- */
-enum batadv_tvlv_type {
-       BATADV_TVLV_GW          = 0x01,
-       BATADV_TVLV_DAT         = 0x02,
-       BATADV_TVLV_NC          = 0x03,
-       BATADV_TVLV_TT          = 0x04,
-       BATADV_TVLV_ROAM        = 0x05,
-       BATADV_TVLV_MCAST       = 0x06,
-};
-
-#pragma pack(2)
-/* the destination hardware field in the ARP frame is used to
- * transport the claim type and the group id
- */
-struct batadv_bla_claim_dst {
-       u8     magic[3];        /* FF:43:05 */
-       u8     type;            /* bla_claimframe */
-       __be16 group;           /* group id */
-};
-
-#pragma pack()
-
-/**
- * struct batadv_ogm_packet - ogm (routing protocol) packet
- * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
- * @flags: contains routing relevant flags - see enum batadv_iv_flags
- * @seqno: sequence identification
- * @orig: address of the source node
- * @prev_sender: address of the previous sender
- * @reserved: reserved byte for alignment
- * @tq: transmission quality
- * @tvlv_len: length of tvlv data following the ogm header
- */
-struct batadv_ogm_packet {
-       u8     packet_type;
-       u8     version;
-       u8     ttl;
-       u8     flags;
-       __be32 seqno;
-       u8     orig[ETH_ALEN];
-       u8     prev_sender[ETH_ALEN];
-       u8     reserved;
-       u8     tq;
-       __be16 tvlv_len;
-       /* __packed is not needed as the struct size is divisible by 4,
-        * and the largest data type in this struct has a size of 4.
-        */
-};
-
-#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
-
-/**
- * struct batadv_ogm2_packet - ogm2 (routing protocol) packet
- * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the general header
- * @ttl: time to live for this packet, part of the general header
- * @flags: reseved for routing relevant flags - currently always 0
- * @seqno: sequence number
- * @orig: originator mac address
- * @tvlv_len: length of the appended tvlv buffer (in bytes)
- * @throughput: the currently flooded path throughput
- */
-struct batadv_ogm2_packet {
-       u8     packet_type;
-       u8     version;
-       u8     ttl;
-       u8     flags;
-       __be32 seqno;
-       u8     orig[ETH_ALEN];
-       __be16 tvlv_len;
-       __be32 throughput;
-       /* __packed is not needed as the struct size is divisible by 4,
-        * and the largest data type in this struct has a size of 4.
-        */
-};
-
-#define BATADV_OGM2_HLEN sizeof(struct batadv_ogm2_packet)
-
-/**
- * struct batadv_elp_packet - elp (neighbor discovery) packet
- * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @orig: originator mac address
- * @seqno: sequence number
- * @elp_interval: currently used ELP sending interval in ms
- */
-struct batadv_elp_packet {
-       u8     packet_type;
-       u8     version;
-       u8     orig[ETH_ALEN];
-       __be32 seqno;
-       __be32 elp_interval;
-};
-
-#define BATADV_ELP_HLEN sizeof(struct batadv_elp_packet)
-
-/**
- * struct batadv_icmp_header - common members among all the ICMP packets
- * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
- * @msg_type: ICMP packet type
- * @dst: address of the destination node
- * @orig: address of the source node
- * @uid: local ICMP socket identifier
- * @align: not used - useful for alignment purposes only
- *
- * This structure is used for ICMP packets parsing only and it is never sent
- * over the wire. The alignment field at the end is there to ensure that
- * members are padded the same way as they are in real packets.
- */
-struct batadv_icmp_header {
-       u8 packet_type;
-       u8 version;
-       u8 ttl;
-       u8 msg_type; /* see ICMP message types above */
-       u8 dst[ETH_ALEN];
-       u8 orig[ETH_ALEN];
-       u8 uid;
-       u8 align[3];
-};
-
-/**
- * struct batadv_icmp_packet - ICMP packet
- * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
- * @msg_type: ICMP packet type
- * @dst: address of the destination node
- * @orig: address of the source node
- * @uid: local ICMP socket identifier
- * @reserved: not used - useful for alignment
- * @seqno: ICMP sequence number
- */
-struct batadv_icmp_packet {
-       u8     packet_type;
-       u8     version;
-       u8     ttl;
-       u8     msg_type; /* see ICMP message types above */
-       u8     dst[ETH_ALEN];
-       u8     orig[ETH_ALEN];
-       u8     uid;
-       u8     reserved;
-       __be16 seqno;
-};
-
-/**
- * struct batadv_icmp_tp_packet - ICMP TP Meter packet
- * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
- * @msg_type: ICMP packet type
- * @dst: address of the destination node
- * @orig: address of the source node
- * @uid: local ICMP socket identifier
- * @subtype: TP packet subtype (see batadv_icmp_tp_subtype)
- * @session: TP session identifier
- * @seqno: the TP sequence number
- * @timestamp: time when the packet has been sent. This value is filled in a
- *  TP_MSG and echoed back in the next TP_ACK so that the sender can compute the
- *  RTT. Since it is read only by the host which wrote it, there is no need to
- *  store it using network order
- */
-struct batadv_icmp_tp_packet {
-       u8  packet_type;
-       u8  version;
-       u8  ttl;
-       u8  msg_type; /* see ICMP message types above */
-       u8  dst[ETH_ALEN];
-       u8  orig[ETH_ALEN];
-       u8  uid;
-       u8  subtype;
-       u8  session[2];
-       __be32 seqno;
-       __be32 timestamp;
-};
-
-/**
- * enum batadv_icmp_tp_subtype - ICMP TP Meter packet subtypes
- * @BATADV_TP_MSG: Msg from sender to receiver
- * @BATADV_TP_ACK: acknowledgment from receiver to sender
- */
-enum batadv_icmp_tp_subtype {
-       BATADV_TP_MSG   = 0,
-       BATADV_TP_ACK,
-};
-
-#define BATADV_RR_LEN 16
-
-/**
- * struct batadv_icmp_packet_rr - ICMP RouteRecord packet
- * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
- * @msg_type: ICMP packet type
- * @dst: address of the destination node
- * @orig: address of the source node
- * @uid: local ICMP socket identifier
- * @rr_cur: number of entries the rr array
- * @seqno: ICMP sequence number
- * @rr: route record array
- */
-struct batadv_icmp_packet_rr {
-       u8     packet_type;
-       u8     version;
-       u8     ttl;
-       u8     msg_type; /* see ICMP message types above */
-       u8     dst[ETH_ALEN];
-       u8     orig[ETH_ALEN];
-       u8     uid;
-       u8     rr_cur;
-       __be16 seqno;
-       u8     rr[BATADV_RR_LEN][ETH_ALEN];
-};
-
-#define BATADV_ICMP_MAX_PACKET_SIZE    sizeof(struct batadv_icmp_packet_rr)
-
-/* All packet headers in front of an ethernet header have to be completely
- * divisible by 2 but not by 4 to make the payload after the ethernet
- * header again 4 bytes boundary aligned.
- *
- * A packing of 2 is necessary to avoid extra padding at the end of the struct
- * caused by a structure member which is larger than two bytes. Otherwise
- * the structure would not fulfill the previously mentioned rule to avoid the
- * misalignment of the payload after the ethernet header. It may also lead to
- * leakage of information when the padding it not initialized before sending.
- */
-#pragma pack(2)
-
-/**
- * struct batadv_unicast_packet - unicast packet for network payload
- * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
- * @ttvn: translation table version number
- * @dest: originator destination of the unicast packet
- */
-struct batadv_unicast_packet {
-       u8 packet_type;
-       u8 version;
-       u8 ttl;
-       u8 ttvn; /* destination translation table version number */
-       u8 dest[ETH_ALEN];
-       /* "4 bytes boundary + 2 bytes" long to make the payload after the
-        * following ethernet header again 4 bytes boundary aligned
-        */
-};
-
-/**
- * struct batadv_unicast_4addr_packet - extended unicast packet
- * @u: common unicast packet header
- * @src: address of the source
- * @subtype: packet subtype
- * @reserved: reserved byte for alignment
- */
-struct batadv_unicast_4addr_packet {
-       struct batadv_unicast_packet u;
-       u8 src[ETH_ALEN];
-       u8 subtype;
-       u8 reserved;
-       /* "4 bytes boundary + 2 bytes" long to make the payload after the
-        * following ethernet header again 4 bytes boundary aligned
-        */
-};
-
-/**
- * struct batadv_frag_packet - fragmented packet
- * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
- * @dest: final destination used when routing fragments
- * @orig: originator of the fragment used when merging the packet
- * @no: fragment number within this sequence
- * @priority: priority of frame, from ToS IP precedence or 802.1p
- * @reserved: reserved byte for alignment
- * @seqno: sequence identification
- * @total_size: size of the merged packet
- */
-struct batadv_frag_packet {
-       u8     packet_type;
-       u8     version;  /* batman version field */
-       u8     ttl;
-#if defined(__BIG_ENDIAN_BITFIELD)
-       u8     no:4;
-       u8     priority:3;
-       u8     reserved:1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-       u8     reserved:1;
-       u8     priority:3;
-       u8     no:4;
-#else
-#error "unknown bitfield endianness"
-#endif
-       u8     dest[ETH_ALEN];
-       u8     orig[ETH_ALEN];
-       __be16 seqno;
-       __be16 total_size;
-};
-
-/**
- * struct batadv_bcast_packet - broadcast packet for network payload
- * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
- * @reserved: reserved byte for alignment
- * @seqno: sequence identification
- * @orig: originator of the broadcast packet
- */
-struct batadv_bcast_packet {
-       u8     packet_type;
-       u8     version;  /* batman version field */
-       u8     ttl;
-       u8     reserved;
-       __be32 seqno;
-       u8     orig[ETH_ALEN];
-       /* "4 bytes boundary + 2 bytes" long to make the payload after the
-        * following ethernet header again 4 bytes boundary aligned
-        */
-};
-
-/**
- * struct batadv_coded_packet - network coded packet
- * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
- * @first_source: original source of first included packet
- * @first_orig_dest: original destinal of first included packet
- * @first_crc: checksum of first included packet
- * @first_ttvn: tt-version number of first included packet
- * @second_ttl: ttl of second packet
- * @second_dest: second receiver of this coded packet
- * @second_source: original source of second included packet
- * @second_orig_dest: original destination of second included packet
- * @second_crc: checksum of second included packet
- * @second_ttvn: tt version number of second included packet
- * @coded_len: length of network coded part of the payload
- */
-struct batadv_coded_packet {
-       u8     packet_type;
-       u8     version;  /* batman version field */
-       u8     ttl;
-       u8     first_ttvn;
-       /* u8  first_dest[ETH_ALEN]; - saved in mac header destination */
-       u8     first_source[ETH_ALEN];
-       u8     first_orig_dest[ETH_ALEN];
-       __be32 first_crc;
-       u8     second_ttl;
-       u8     second_ttvn;
-       u8     second_dest[ETH_ALEN];
-       u8     second_source[ETH_ALEN];
-       u8     second_orig_dest[ETH_ALEN];
-       __be32 second_crc;
-       __be16 coded_len;
-};
-
-#pragma pack()
-
-/**
- * struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload
- * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
- * @reserved: reserved field (for packet alignment)
- * @src: address of the source
- * @dst: address of the destination
- * @tvlv_len: length of tvlv data following the unicast tvlv header
- * @align: 2 bytes to align the header to a 4 byte boundary
- */
-struct batadv_unicast_tvlv_packet {
-       u8     packet_type;
-       u8     version;  /* batman version field */
-       u8     ttl;
-       u8     reserved;
-       u8     dst[ETH_ALEN];
-       u8     src[ETH_ALEN];
-       __be16 tvlv_len;
-       u16    align;
-};
-
-/**
- * struct batadv_tvlv_hdr - base tvlv header struct
- * @type: tvlv container type (see batadv_tvlv_type)
- * @version: tvlv container version
- * @len: tvlv container length
- */
-struct batadv_tvlv_hdr {
-       u8     type;
-       u8     version;
-       __be16 len;
-};
-
-/**
- * struct batadv_tvlv_gateway_data - gateway data propagated through gw tvlv
- *  container
- * @bandwidth_down: advertised uplink download bandwidth
- * @bandwidth_up: advertised uplink upload bandwidth
- */
-struct batadv_tvlv_gateway_data {
-       __be32 bandwidth_down;
-       __be32 bandwidth_up;
-};
-
-/**
- * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container
- * @flags: translation table flags (see batadv_tt_data_flags)
- * @ttvn: translation table version number
- * @num_vlan: number of announced VLANs. In the TVLV this struct is followed by
- *  one batadv_tvlv_tt_vlan_data object per announced vlan
- */
-struct batadv_tvlv_tt_data {
-       u8     flags;
-       u8     ttvn;
-       __be16 num_vlan;
-};
-
-/**
- * struct batadv_tvlv_tt_vlan_data - vlan specific tt data propagated through
- *  the tt tvlv container
- * @crc: crc32 checksum of the entries belonging to this vlan
- * @vid: vlan identifier
- * @reserved: unused, useful for alignment purposes
- */
-struct batadv_tvlv_tt_vlan_data {
-       __be32 crc;
-       __be16 vid;
-       u16    reserved;
-};
-
-/**
- * struct batadv_tvlv_tt_change - translation table diff data
- * @flags: status indicators concerning the non-mesh client (see
- *  batadv_tt_client_flags)
- * @reserved: reserved field - useful for alignment purposes only
- * @addr: mac address of non-mesh client that triggered this tt change
- * @vid: VLAN identifier
- */
-struct batadv_tvlv_tt_change {
-       u8     flags;
-       u8     reserved[3];
-       u8     addr[ETH_ALEN];
-       __be16 vid;
-};
-
-/**
- * struct batadv_tvlv_roam_adv - roaming advertisement
- * @client: mac address of roaming client
- * @vid: VLAN identifier
- */
-struct batadv_tvlv_roam_adv {
-       u8     client[ETH_ALEN];
-       __be16 vid;
-};
-
-/**
- * struct batadv_tvlv_mcast_data - payload of a multicast tvlv
- * @flags: multicast flags announced by the orig node
- * @reserved: reserved field
- */
-struct batadv_tvlv_mcast_data {
-       u8 flags;
-       u8 reserved[3];
-};
-
-#endif /* _NET_BATMAN_ADV_PACKET_H_ */
index 40d9bf3e5bfe22aaa262e261abc7bf20bfe2bcb8..b6891e8b741c424496c58a21944f5bf30cff02ff 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -33,6 +34,7 @@
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/stddef.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bitarray.h"
 #include "bridge_loop_avoidance.h"
@@ -43,7 +45,6 @@
 #include "log.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "soft-interface.h"
 #include "tp_meter.h"
@@ -54,7 +55,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if);
 
 /**
- * _batadv_update_route - set the router for this originator
+ * _batadv_update_route() - set the router for this originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be configured
  * @recv_if: the receive interface for which this route is set
@@ -118,7 +119,7 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_update_route - set the router for this originator
+ * batadv_update_route() - set the router for this originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be configured
  * @recv_if: the receive interface for which this route is set
@@ -145,7 +146,7 @@ out:
 }
 
 /**
- * batadv_window_protected - checks whether the host restarted and is in the
+ * batadv_window_protected() - checks whether the host restarted and is in the
  *  protection time.
  * @bat_priv: the bat priv with all the soft interface information
  * @seq_num_diff: difference between the current/received sequence number and
@@ -180,6 +181,14 @@ bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
        return false;
 }
 
+/**
+ * batadv_check_management_packet() - Check preconditions for management packets
+ * @skb: incoming packet buffer
+ * @hard_iface: incoming hard interface
+ * @header_len: minimal header length of packet type
+ *
+ * Return: true when management preconditions are met, false otherwise
+ */
 bool batadv_check_management_packet(struct sk_buff *skb,
                                    struct batadv_hard_iface *hard_iface,
                                    int header_len)
@@ -212,7 +221,7 @@ bool batadv_check_management_packet(struct sk_buff *skb,
 }
 
 /**
- * batadv_recv_my_icmp_packet - receive an icmp packet locally
+ * batadv_recv_my_icmp_packet() - receive an icmp packet locally
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: icmp packet to process
  *
@@ -347,6 +356,13 @@ out:
        return ret;
 }
 
+/**
+ * batadv_recv_icmp_packet() - Process incoming icmp packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_recv_icmp_packet(struct sk_buff *skb,
                            struct batadv_hard_iface *recv_if)
 {
@@ -440,7 +456,7 @@ free_skb:
 }
 
 /**
- * batadv_check_unicast_packet - Check for malformed unicast packets
+ * batadv_check_unicast_packet() - Check for malformed unicast packets
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  * @hdr_size: size of header to pull
@@ -478,7 +494,7 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_last_bonding_get - Get last_bonding_candidate of orig_node
+ * batadv_last_bonding_get() - Get last_bonding_candidate of orig_node
  * @orig_node: originator node whose last bonding candidate should be retrieved
  *
  * Return: last bonding candidate of router or NULL if not found
@@ -501,7 +517,7 @@ batadv_last_bonding_get(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
+ * batadv_last_bonding_replace() - Replace last_bonding_candidate of orig_node
  * @orig_node: originator node whose bonding candidates should be replaced
  * @new_candidate: new bonding candidate or NULL
  */
@@ -524,7 +540,7 @@ batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_find_router - find a suitable router for this originator
+ * batadv_find_router() - find a suitable router for this originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the destination node
  * @recv_if: pointer to interface this packet was received on
@@ -741,7 +757,7 @@ free_skb:
 }
 
 /**
- * batadv_reroute_unicast_packet - update the unicast header for re-routing
+ * batadv_reroute_unicast_packet() - update the unicast header for re-routing
  * @bat_priv: the bat priv with all the soft interface information
  * @unicast_packet: the unicast header to be updated
  * @dst_addr: the payload destination
@@ -904,7 +920,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_recv_unhandled_unicast_packet - receive and process packets which
+ * batadv_recv_unhandled_unicast_packet() - receive and process packets which
  *     are in the unicast number space but not yet known to the implementation
  * @skb: unicast tvlv packet to process
  * @recv_if: pointer to interface this packet was received on
@@ -935,6 +951,13 @@ free_skb:
        return NET_RX_DROP;
 }
 
+/**
+ * batadv_recv_unicast_packet() - Process incoming unicast packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_recv_unicast_packet(struct sk_buff *skb,
                               struct batadv_hard_iface *recv_if)
 {
@@ -1036,7 +1059,7 @@ free_skb:
 }
 
 /**
- * batadv_recv_unicast_tvlv - receive and process unicast tvlv packets
+ * batadv_recv_unicast_tvlv() - receive and process unicast tvlv packets
  * @skb: unicast tvlv packet to process
  * @recv_if: pointer to interface this packet was received on
  *
@@ -1090,7 +1113,7 @@ free_skb:
 }
 
 /**
- * batadv_recv_frag_packet - process received fragment
+ * batadv_recv_frag_packet() - process received fragment
  * @skb: the received fragment
  * @recv_if: interface that the skb is received on
  *
@@ -1155,6 +1178,13 @@ free_skb:
        return ret;
 }
 
+/**
+ * batadv_recv_bcast_packet() - Process incoming broadcast packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_recv_bcast_packet(struct sk_buff *skb,
                             struct batadv_hard_iface *recv_if)
 {
index 5ede16c32f157369ba08fdd52357d3e09e123b83..a1289bc5f1159f4fd438352fc74b666e8f7c331e 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
index 7895323fd2a79edf9dfe3b0fea9716729191b508..2a5ab6f1076d44a8ea094e60b5ad470f36b1d881 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -23,7 +24,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
@@ -54,7 +55,7 @@
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
 
 /**
- * batadv_send_skb_packet - send an already prepared packet
+ * batadv_send_skb_packet() - send an already prepared packet
  * @skb: the packet to send
  * @hard_iface: the interface to use to send the broadcast packet
  * @dst_addr: the payload destination
@@ -123,12 +124,30 @@ send_skb_err:
        return NET_XMIT_DROP;
 }
 
+/**
+ * batadv_send_broadcast_skb() - Send broadcast packet via hard interface
+ * @skb: packet to be transmitted (with batadv header and no outer eth header)
+ * @hard_iface: outgoing interface
+ *
+ * Return: A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ */
 int batadv_send_broadcast_skb(struct sk_buff *skb,
                              struct batadv_hard_iface *hard_iface)
 {
        return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
 }
 
+/**
+ * batadv_send_unicast_skb() - Send unicast packet to neighbor
+ * @skb: packet to be transmitted (with batadv header and no outer eth header)
+ * @neigh: neighbor which is used as next hop to destination
+ *
+ * Return: A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ */
 int batadv_send_unicast_skb(struct sk_buff *skb,
                            struct batadv_neigh_node *neigh)
 {
@@ -153,7 +172,7 @@ int batadv_send_unicast_skb(struct sk_buff *skb,
 }
 
 /**
- * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
+ * batadv_send_skb_to_orig() - Lookup next-hop and transmit skb.
  * @skb: Packet to be transmitted.
  * @orig_node: Final destination of the packet.
  * @recv_if: Interface used when receiving the packet (can be NULL).
@@ -216,7 +235,7 @@ free_skb:
 }
 
 /**
- * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
+ * batadv_send_skb_push_fill_unicast() - extend the buffer and initialize the
  *  common fields for unicast packets
  * @skb: the skb carrying the unicast header to initialize
  * @hdr_size: amount of bytes to push at the beginning of the skb
@@ -249,7 +268,7 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
 }
 
 /**
- * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
+ * batadv_send_skb_prepare_unicast() - encapsulate an skb with a unicast header
  * @skb: the skb containing the payload to encapsulate
  * @orig_node: the destination node
  *
@@ -264,7 +283,7 @@ static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
 }
 
 /**
- * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
+ * batadv_send_skb_prepare_unicast_4addr() - encapsulate an skb with a
  *  unicast 4addr header
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the skb containing the payload to encapsulate
@@ -308,7 +327,7 @@ out:
 }
 
 /**
- * batadv_send_skb_unicast - encapsulate and send an skb via unicast
+ * batadv_send_skb_unicast() - encapsulate and send an skb via unicast
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @packet_type: the batman unicast packet type to use
@@ -378,7 +397,7 @@ out:
 }
 
 /**
- * batadv_send_skb_via_tt_generic - send an skb via TT lookup
+ * batadv_send_skb_via_tt_generic() - send an skb via TT lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @packet_type: the batman unicast packet type to use
@@ -425,7 +444,7 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_skb_via_gw - send an skb via gateway lookup
+ * batadv_send_skb_via_gw() - send an skb via gateway lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @vid: the vid to be used to search the translation table
@@ -452,7 +471,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_forw_packet_free - free a forwarding packet
+ * batadv_forw_packet_free() - free a forwarding packet
  * @forw_packet: The packet to free
  * @dropped: whether the packet is freed because is is dropped
  *
@@ -477,7 +496,7 @@ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_alloc - allocate a forwarding packet
+ * batadv_forw_packet_alloc() - allocate a forwarding packet
  * @if_incoming: The (optional) if_incoming to be grabbed
  * @if_outgoing: The (optional) if_outgoing to be grabbed
  * @queue_left: The (optional) queue counter to decrease
@@ -543,7 +562,7 @@ err:
 }
 
 /**
- * batadv_forw_packet_was_stolen - check whether someone stole this packet
+ * batadv_forw_packet_was_stolen() - check whether someone stole this packet
  * @forw_packet: the forwarding packet to check
  *
  * This function checks whether the given forwarding packet was claimed by
@@ -558,7 +577,7 @@ batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet)
 }
 
 /**
- * batadv_forw_packet_steal - claim a forw_packet for free()
+ * batadv_forw_packet_steal() - claim a forw_packet for free()
  * @forw_packet: the forwarding packet to steal
  * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock)
  *
@@ -589,7 +608,7 @@ bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_list_steal - claim a list of forward packets for free()
+ * batadv_forw_packet_list_steal() - claim a list of forward packets for free()
  * @forw_list: the to be stolen forward packets
  * @cleanup_list: a backup pointer, to be able to dispose the packet later
  * @hard_iface: the interface to steal forward packets from
@@ -625,7 +644,7 @@ batadv_forw_packet_list_steal(struct hlist_head *forw_list,
 }
 
 /**
- * batadv_forw_packet_list_free - free a list of forward packets
+ * batadv_forw_packet_list_free() - free a list of forward packets
  * @head: a list of to be freed forw_packets
  *
  * This function cancels the scheduling of any packet in the provided list,
@@ -649,7 +668,7 @@ static void batadv_forw_packet_list_free(struct hlist_head *head)
 }
 
 /**
- * batadv_forw_packet_queue - try to queue a forwarding packet
+ * batadv_forw_packet_queue() - try to queue a forwarding packet
  * @forw_packet: the forwarding packet to queue
  * @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock)
  * @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list)
@@ -693,7 +712,7 @@ static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_bcast_queue - try to queue a broadcast packet
+ * batadv_forw_packet_bcast_queue() - try to queue a broadcast packet
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the forwarding packet to queue
  * @send_time: timestamp (jiffies) when the packet is to be sent
@@ -712,7 +731,7 @@ batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_forw_packet_ogmv1_queue - try to queue an OGMv1 packet
+ * batadv_forw_packet_ogmv1_queue() - try to queue an OGMv1 packet
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the forwarding packet to queue
  * @send_time: timestamp (jiffies) when the packet is to be sent
@@ -730,7 +749,7 @@ void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
+ * batadv_add_bcast_packet_to_list() - queue broadcast packet for multiple sends
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: broadcast packet to add
  * @delay: number of jiffies to wait before sending
@@ -790,7 +809,7 @@ err:
 }
 
 /**
- * batadv_forw_packet_bcasts_left - check if a retransmission is necessary
+ * batadv_forw_packet_bcasts_left() - check if a retransmission is necessary
  * @forw_packet: the forwarding packet to check
  * @hard_iface: the interface to check on
  *
@@ -818,7 +837,8 @@ batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_bcasts_inc - increment retransmission counter of a packet
+ * batadv_forw_packet_bcasts_inc() - increment retransmission counter of a
+ *  packet
  * @forw_packet: the packet to increase the counter for
  */
 static void
@@ -828,7 +848,7 @@ batadv_forw_packet_bcasts_inc(struct batadv_forw_packet *forw_packet)
 }
 
 /**
- * batadv_forw_packet_is_rebroadcast - check packet for previous transmissions
+ * batadv_forw_packet_is_rebroadcast() - check packet for previous transmissions
  * @forw_packet: the packet to check
  *
  * Return: True if this packet was transmitted before, false otherwise.
@@ -953,7 +973,7 @@ out:
 }
 
 /**
- * batadv_purge_outstanding_packets - stop/purge scheduled bcast/OGMv1 packets
+ * batadv_purge_outstanding_packets() - stop/purge scheduled bcast/OGMv1 packets
  * @bat_priv: the bat priv with all the soft interface information
  * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on
  *
index a16b34f473ef02e46e642b46d4b31f588d89ea67..1e8c79093623ea82b8cb35cd0a20ab6fac4f0fdd 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -23,8 +24,7 @@
 #include <linux/compiler.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
-
-#include "packet.h"
+#include <uapi/linux/batadv_packet.h>
 
 struct sk_buff;
 
@@ -76,7 +76,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
                           unsigned short vid);
 
 /**
- * batadv_send_skb_via_tt - send an skb via TT lookup
+ * batadv_send_skb_via_tt() - send an skb via TT lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the payload to send
  * @dst_hint: can be used to override the destination contained in the skb
@@ -97,7 +97,7 @@ static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_skb_via_tt_4addr - send an skb via TT lookup
+ * batadv_send_skb_via_tt_4addr() - send an skb via TT lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the payload to send
  * @packet_subtype: the unicast 4addr packet subtype to use
index 9f673cdfecf8d32a0385caf218baa2f37f76e148..900c5ce21cd410dc77bc0e008ae714c95e6af55a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -26,7 +27,7 @@
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/jiffies.h>
@@ -48,6 +49,7 @@
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_algo.h"
 #include "bridge_loop_avoidance.h"
 #include "multicast.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "sysfs.h"
 #include "translation-table.h"
 
+/**
+ * batadv_skb_head_push() - Increase header size and move (push) head pointer
+ * @skb: packet buffer which should be modified
+ * @len: number of bytes to add
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
 {
        int result;
@@ -96,7 +104,7 @@ static int batadv_interface_release(struct net_device *dev)
 }
 
 /**
- * batadv_sum_counter - Sum the cpu-local counters for index 'idx'
+ * batadv_sum_counter() - Sum the cpu-local counters for index 'idx'
  * @bat_priv: the bat priv with all the soft interface information
  * @idx: index of counter to sum up
  *
@@ -169,7 +177,7 @@ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
 }
 
 /**
- * batadv_interface_set_rx_mode - set the rx mode of a device
+ * batadv_interface_set_rx_mode() - set the rx mode of a device
  * @dev: registered network device to modify
  *
  * We do not actually need to set any rx filters for the virtual batman
@@ -389,7 +397,7 @@ end:
 }
 
 /**
- * batadv_interface_rx - receive ethernet frame on local batman-adv interface
+ * batadv_interface_rx() - receive ethernet frame on local batman-adv interface
  * @soft_iface: local interface which will receive the ethernet frame
  * @skb: ethernet frame for @soft_iface
  * @hdr_size: size of already parsed batman-adv header
@@ -501,8 +509,8 @@ out:
 }
 
 /**
- * batadv_softif_vlan_release - release vlan from lists and queue for free after
- *  rcu grace period
+ * batadv_softif_vlan_release() - release vlan from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the vlan object
  */
 static void batadv_softif_vlan_release(struct kref *ref)
@@ -519,7 +527,7 @@ static void batadv_softif_vlan_release(struct kref *ref)
 }
 
 /**
- * batadv_softif_vlan_put - decrease the vlan object refcounter and
+ * batadv_softif_vlan_put() - decrease the vlan object refcounter and
  *  possibly release it
  * @vlan: the vlan object to release
  */
@@ -532,7 +540,7 @@ void batadv_softif_vlan_put(struct batadv_softif_vlan *vlan)
 }
 
 /**
- * batadv_softif_vlan_get - get the vlan object for a specific vid
+ * batadv_softif_vlan_get() - get the vlan object for a specific vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the identifier of the vlan object to retrieve
  *
@@ -561,7 +569,7 @@ struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_softif_create_vlan - allocate the needed resources for a new vlan
+ * batadv_softif_create_vlan() - allocate the needed resources for a new vlan
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  *
@@ -613,7 +621,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
 }
 
 /**
- * batadv_softif_destroy_vlan - remove and destroy a softif_vlan object
+ * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object
  * @bat_priv: the bat priv with all the soft interface information
  * @vlan: the object to remove
  */
@@ -631,7 +639,7 @@ static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_interface_add_vid - ndo_add_vid API implementation
+ * batadv_interface_add_vid() - ndo_add_vid API implementation
  * @dev: the netdev of the mesh interface
  * @proto: protocol of the the vlan id
  * @vid: identifier of the new vlan
@@ -689,7 +697,7 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
 }
 
 /**
- * batadv_interface_kill_vid - ndo_kill_vid API implementation
+ * batadv_interface_kill_vid() - ndo_kill_vid API implementation
  * @dev: the netdev of the mesh interface
  * @proto: protocol of the the vlan id
  * @vid: identifier of the deleted vlan
@@ -732,7 +740,7 @@ static struct lock_class_key batadv_netdev_xmit_lock_key;
 static struct lock_class_key batadv_netdev_addr_lock_key;
 
 /**
- * batadv_set_lockdep_class_one - Set lockdep class for a single tx queue
+ * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
  * @dev: device which owns the tx queue
  * @txq: tx queue to modify
  * @_unused: always NULL
@@ -745,7 +753,7 @@ static void batadv_set_lockdep_class_one(struct net_device *dev,
 }
 
 /**
- * batadv_set_lockdep_class - Set txq and addr_list lockdep class
+ * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
  * @dev: network device to modify
  */
 static void batadv_set_lockdep_class(struct net_device *dev)
@@ -755,7 +763,7 @@ static void batadv_set_lockdep_class(struct net_device *dev)
 }
 
 /**
- * batadv_softif_init_late - late stage initialization of soft interface
+ * batadv_softif_init_late() - late stage initialization of soft interface
  * @dev: registered network device to modify
  *
  * Return: error code on failures
@@ -860,7 +868,7 @@ free_bat_counters:
 }
 
 /**
- * batadv_softif_slave_add - Add a slave interface to a batadv_soft_interface
+ * batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface
  * @dev: batadv_soft_interface used as master interface
  * @slave_dev: net_device which should become the slave interface
  * @extack: extended ACK report struct
@@ -888,7 +896,7 @@ out:
 }
 
 /**
- * batadv_softif_slave_del - Delete a slave iface from a batadv_soft_interface
+ * batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface
  * @dev: batadv_soft_interface used as master interface
  * @slave_dev: net_device which should be removed from the master interface
  *
@@ -1023,7 +1031,7 @@ static const struct ethtool_ops batadv_ethtool_ops = {
 };
 
 /**
- * batadv_softif_free - Deconstructor of batadv_soft_interface
+ * batadv_softif_free() - Deconstructor of batadv_soft_interface
  * @dev: Device to cleanup and remove
  */
 static void batadv_softif_free(struct net_device *dev)
@@ -1039,7 +1047,7 @@ static void batadv_softif_free(struct net_device *dev)
 }
 
 /**
- * batadv_softif_init_early - early stage initialization of soft interface
+ * batadv_softif_init_early() - early stage initialization of soft interface
  * @dev: registered network device to modify
  */
 static void batadv_softif_init_early(struct net_device *dev)
@@ -1063,6 +1071,13 @@ static void batadv_softif_init_early(struct net_device *dev)
        dev->ethtool_ops = &batadv_ethtool_ops;
 }
 
+/**
+ * batadv_softif_create() - Create and register soft interface
+ * @net: the applicable net namespace
+ * @name: name of the new soft interface
+ *
+ * Return: newly allocated soft_interface, NULL on errors
+ */
 struct net_device *batadv_softif_create(struct net *net, const char *name)
 {
        struct net_device *soft_iface;
@@ -1089,7 +1104,7 @@ struct net_device *batadv_softif_create(struct net *net, const char *name)
 }
 
 /**
- * batadv_softif_destroy_sysfs - deletion of batadv_soft_interface via sysfs
+ * batadv_softif_destroy_sysfs() - deletion of batadv_soft_interface via sysfs
  * @soft_iface: the to-be-removed batman-adv interface
  */
 void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
@@ -1111,7 +1126,8 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
 }
 
 /**
- * batadv_softif_destroy_netlink - deletion of batadv_soft_interface via netlink
+ * batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
+ *  netlink
  * @soft_iface: the to-be-removed batman-adv interface
  * @head: list pointer
  */
@@ -1139,6 +1155,12 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
        unregister_netdevice_queue(soft_iface, head);
 }
 
+/**
+ * batadv_softif_is_valid() - Check whether device is a batadv soft interface
+ * @net_dev: device which should be checked
+ *
+ * Return: true when net_dev is a batman-adv interface, false otherwise
+ */
 bool batadv_softif_is_valid(const struct net_device *net_dev)
 {
        if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
index 639c3abb214a4dc44053b2868e51484c9cd2c344..075c5b5b2ce1cb08bca5c16a7fa90f478098cc87 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
index aa187fd42475436b93ac2e76b834aa6cad870edf..c1578fa0b952ab2e997d62e4f95f2111df063882 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
 #include <linux/compiler.h>
 #include <linux/device.h>
 #include <linux/errno.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if.h>
 #include <linux/if_vlan.h>
 #include <linux/kernel.h>
+#include <linux/kobject.h>
 #include <linux/kref.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
@@ -37,6 +39,7 @@
 #include <linux/string.h>
 #include <linux/stringify.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
@@ -45,7 +48,6 @@
 #include "hard-interface.h"
 #include "log.h"
 #include "network-coding.h"
-#include "packet.h"
 #include "soft-interface.h"
 
 static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
@@ -63,7 +65,7 @@ static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
 }
 
 /**
- * batadv_vlan_kobj_to_batpriv - convert a vlan kobj in the associated batpriv
+ * batadv_vlan_kobj_to_batpriv() - convert a vlan kobj in the associated batpriv
  * @obj: kobject to covert
  *
  * Return: the associated batadv_priv struct.
@@ -83,7 +85,7 @@ static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
 }
 
 /**
- * batadv_kobj_to_vlan - convert a kobj in the associated softif_vlan struct
+ * batadv_kobj_to_vlan() - convert a kobj in the associated softif_vlan struct
  * @bat_priv: the bat priv with all the soft interface information
  * @obj: kobject to covert
  *
@@ -598,7 +600,7 @@ static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
 }
 
 /**
- * batadv_show_isolation_mark - print the current isolation mark/mask
+ * batadv_show_isolation_mark() - print the current isolation mark/mask
  * @kobj: kobject representing the private mesh sysfs directory
  * @attr: the batman-adv attribute the user is interacting with
  * @buff: the buffer that will contain the data to send back to the user
@@ -616,8 +618,8 @@ static ssize_t batadv_show_isolation_mark(struct kobject *kobj,
 }
 
 /**
- * batadv_store_isolation_mark - parse and store the isolation mark/mask entered
- *  by the user
+ * batadv_store_isolation_mark() - parse and store the isolation mark/mask
+ *  entered by the user
  * @kobj: kobject representing the private mesh sysfs directory
  * @attr: the batman-adv attribute the user is interacting with
  * @buff: the buffer containing the user data
@@ -733,6 +735,12 @@ static struct batadv_attribute *batadv_vlan_attrs[] = {
        NULL,
 };
 
+/**
+ * batadv_sysfs_add_meshif() - Add soft interface specific sysfs entries
+ * @dev: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_sysfs_add_meshif(struct net_device *dev)
 {
        struct kobject *batif_kobject = &dev->dev.kobj;
@@ -773,6 +781,10 @@ out:
        return -ENOMEM;
 }
 
+/**
+ * batadv_sysfs_del_meshif() - Remove soft interface specific sysfs entries
+ * @dev: netdev struct of the soft interface
+ */
 void batadv_sysfs_del_meshif(struct net_device *dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
@@ -788,7 +800,7 @@ void batadv_sysfs_del_meshif(struct net_device *dev)
 }
 
 /**
- * batadv_sysfs_add_vlan - add all the needed sysfs objects for the new vlan
+ * batadv_sysfs_add_vlan() - add all the needed sysfs objects for the new vlan
  * @dev: netdev of the mesh interface
  * @vlan: private data of the newly added VLAN interface
  *
@@ -849,7 +861,7 @@ out:
 }
 
 /**
- * batadv_sysfs_del_vlan - remove all the sysfs objects for a given VLAN
+ * batadv_sysfs_del_vlan() - remove all the sysfs objects for a given VLAN
  * @bat_priv: the bat priv with all the soft interface information
  * @vlan: the private data of the VLAN to destroy
  */
@@ -894,7 +906,7 @@ static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
 }
 
 /**
- * batadv_store_mesh_iface_finish - store new hardif mesh_iface state
+ * batadv_store_mesh_iface_finish() - store new hardif mesh_iface state
  * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
  * @ifname: name of soft-interface to modify
  *
@@ -947,7 +959,7 @@ out:
 }
 
 /**
- * batadv_store_mesh_iface_work - store new hardif mesh_iface state
+ * batadv_store_mesh_iface_work() - store new hardif mesh_iface state
  * @work: work queue item
  *
  * Changes the parts of the hard+soft interface which can not be modified under
@@ -1043,7 +1055,7 @@ static ssize_t batadv_show_iface_status(struct kobject *kobj,
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
 
 /**
- * batadv_store_throughput_override - parse and store throughput override
+ * batadv_store_throughput_override() - parse and store throughput override
  *  entered by the user
  * @kobj: kobject representing the private mesh sysfs directory
  * @attr: the batman-adv attribute the user is interacting with
@@ -1130,6 +1142,13 @@ static struct batadv_attribute *batadv_batman_attrs[] = {
        NULL,
 };
 
+/**
+ * batadv_sysfs_add_hardif() - Add hard interface specific sysfs entries
+ * @hardif_obj: address where to store the pointer to new sysfs folder
+ * @dev: netdev struct of the hard interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
 {
        struct kobject *hardif_kobject = &dev->dev.kobj;
@@ -1164,6 +1183,11 @@ out:
        return -ENOMEM;
 }
 
+/**
+ * batadv_sysfs_del_hardif() - Remove hard interface specific sysfs entries
+ * @hardif_obj: address to the pointer to which stores batman-adv sysfs folder
+ *  of the hard interface
+ */
 void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
 {
        kobject_uevent(*hardif_obj, KOBJ_REMOVE);
@@ -1172,6 +1196,16 @@ void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
        *hardif_obj = NULL;
 }
 
+/**
+ * batadv_throw_uevent() - Send an uevent with batman-adv specific env data
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: subsystem type of event. Stored in uevent's BATTYPE
+ * @action: action type of event. Stored in uevent's BATACTION
+ * @data: string with additional information to the event (ignored for
+ *  BATADV_UEV_DEL). Stored in uevent's BATDATA
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
                        enum batadv_uev_action action, const char *data)
 {
index e487412e256bbe4a83def9d3e4f73e999dd62d6e..bbeee61221fae6b9e1ca69cb4da8fc23483705a9 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -35,10 +36,23 @@ struct net_device;
  */
 #define BATADV_SYSFS_VLAN_SUBDIR_PREFIX "vlan"
 
+/**
+ * struct batadv_attribute - sysfs export helper for batman-adv attributes
+ */
 struct batadv_attribute {
+       /** @attr: sysfs attribute file */
        struct attribute attr;
+
+       /**
+        * @show: function to export the current attribute's content to sysfs
+        */
        ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
                        char *buf);
+
+       /**
+        * @store: function to load new value from character buffer and save it
+        * in batman-adv attribute
+        */
        ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
                         char *buf, size_t count);
 };
index ebc4e2241c770d826fa8e731ba46ec043fe1992d..8b576712d0c1f772bdeba0b95575e37fad007f09 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Edo Monticelli, Antonio Quartulli
 #include "main.h"
 
 #include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/cache.h>
 #include <linux/compiler.h>
 #include <linux/err.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/timer.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "hard-interface.h"
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 
 /**
@@ -97,7 +98,7 @@
 static u8 batadv_tp_prerandom[4096] __read_mostly;
 
 /**
- * batadv_tp_session_cookie - generate session cookie based on session ids
+ * batadv_tp_session_cookie() - generate session cookie based on session ids
  * @session: TP session identifier
  * @icmp_uid: icmp pseudo uid of the tp session
  *
@@ -115,7 +116,7 @@ static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid)
 }
 
 /**
- * batadv_tp_cwnd - compute the new cwnd size
+ * batadv_tp_cwnd() - compute the new cwnd size
  * @base: base cwnd size value
  * @increment: the value to add to base to get the new size
  * @min: minumim cwnd value (usually MSS)
@@ -140,7 +141,7 @@ static u32 batadv_tp_cwnd(u32 base, u32 increment, u32 min)
 }
 
 /**
- * batadv_tp_updated_cwnd - update the Congestion Windows
+ * batadv_tp_updated_cwnd() - update the Congestion Windows
  * @tp_vars: the private data of the current TP meter session
  * @mss: maximum segment size of transmission
  *
@@ -176,7 +177,7 @@ static void batadv_tp_update_cwnd(struct batadv_tp_vars *tp_vars, u32 mss)
 }
 
 /**
- * batadv_tp_update_rto - calculate new retransmission timeout
+ * batadv_tp_update_rto() - calculate new retransmission timeout
  * @tp_vars: the private data of the current TP meter session
  * @new_rtt: new roundtrip time in msec
  */
@@ -212,7 +213,7 @@ static void batadv_tp_update_rto(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_batctl_notify - send client status result to client
+ * batadv_tp_batctl_notify() - send client status result to client
  * @reason: reason for tp meter session stop
  * @dst: destination of tp_meter session
  * @bat_priv: the bat priv with all the soft interface information
@@ -244,7 +245,7 @@ static void batadv_tp_batctl_notify(enum batadv_tp_meter_reason reason,
 }
 
 /**
- * batadv_tp_batctl_error_notify - send client error result to client
+ * batadv_tp_batctl_error_notify() - send client error result to client
  * @reason: reason for tp meter session stop
  * @dst: destination of tp_meter session
  * @bat_priv: the bat priv with all the soft interface information
@@ -259,7 +260,7 @@ static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason,
 }
 
 /**
- * batadv_tp_list_find - find a tp_vars object in the global list
+ * batadv_tp_list_find() - find a tp_vars object in the global list
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the other endpoint MAC address to look for
  *
@@ -294,7 +295,8 @@ static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_list_find_session - find tp_vars session object in the global list
+ * batadv_tp_list_find_session() - find tp_vars session object in the global
+ *  list
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the other endpoint MAC address to look for
  * @session: session identifier
@@ -335,7 +337,7 @@ batadv_tp_list_find_session(struct batadv_priv *bat_priv, const u8 *dst,
 }
 
 /**
- * batadv_tp_vars_release - release batadv_tp_vars from lists and queue for
+ * batadv_tp_vars_release() - release batadv_tp_vars from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the batadv_tp_vars
  */
@@ -360,7 +362,7 @@ static void batadv_tp_vars_release(struct kref *ref)
 }
 
 /**
- * batadv_tp_vars_put - decrement the batadv_tp_vars refcounter and possibly
+ * batadv_tp_vars_put() - decrement the batadv_tp_vars refcounter and possibly
  *  release it
  * @tp_vars: the private data of the current TP meter session to be free'd
  */
@@ -370,7 +372,7 @@ static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_sender_cleanup - cleanup sender data and drop and timer
+ * batadv_tp_sender_cleanup() - cleanup sender data and drop and timer
  * @bat_priv: the bat priv with all the soft interface information
  * @tp_vars: the private data of the current TP meter session to cleanup
  */
@@ -400,7 +402,7 @@ static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_sender_end - print info about ended session and inform client
+ * batadv_tp_sender_end() - print info about ended session and inform client
  * @bat_priv: the bat priv with all the soft interface information
  * @tp_vars: the private data of the current TP meter session
  */
@@ -433,7 +435,7 @@ static void batadv_tp_sender_end(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_sender_shutdown - let sender thread/timer stop gracefully
+ * batadv_tp_sender_shutdown() - let sender thread/timer stop gracefully
  * @tp_vars: the private data of the current TP meter session
  * @reason: reason for tp meter session stop
  */
@@ -447,7 +449,7 @@ static void batadv_tp_sender_shutdown(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_sender_finish - stop sender session after test_length was reached
+ * batadv_tp_sender_finish() - stop sender session after test_length was reached
  * @work: delayed work reference of the related tp_vars
  */
 static void batadv_tp_sender_finish(struct work_struct *work)
@@ -463,7 +465,7 @@ static void batadv_tp_sender_finish(struct work_struct *work)
 }
 
 /**
- * batadv_tp_reset_sender_timer - reschedule the sender timer
+ * batadv_tp_reset_sender_timer() - reschedule the sender timer
  * @tp_vars: the private TP meter data for this session
  *
  * Reschedule the timer using tp_vars->rto as delay
@@ -481,7 +483,7 @@ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_sender_timeout - timer that fires in case of packet loss
+ * batadv_tp_sender_timeout() - timer that fires in case of packet loss
  * @t: address to timer_list inside tp_vars
  *
  * If fired it means that there was packet loss.
@@ -531,7 +533,7 @@ static void batadv_tp_sender_timeout(struct timer_list *t)
 }
 
 /**
- * batadv_tp_fill_prerandom - Fill buffer with prefetched random bytes
+ * batadv_tp_fill_prerandom() - Fill buffer with prefetched random bytes
  * @tp_vars: the private TP meter data for this session
  * @buf: Buffer to fill with bytes
  * @nbytes: amount of pseudorandom bytes
@@ -563,7 +565,7 @@ static void batadv_tp_fill_prerandom(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_send_msg - send a single message
+ * batadv_tp_send_msg() - send a single message
  * @tp_vars: the private TP meter data for this session
  * @src: source mac address
  * @orig_node: the originator of the destination
@@ -623,7 +625,7 @@ static int batadv_tp_send_msg(struct batadv_tp_vars *tp_vars, const u8 *src,
 }
 
 /**
- * batadv_tp_recv_ack - ACK receiving function
+ * batadv_tp_recv_ack() - ACK receiving function
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the buffer containing the received packet
  *
@@ -765,7 +767,7 @@ out:
 }
 
 /**
- * batadv_tp_avail - check if congestion window is not full
+ * batadv_tp_avail() - check if congestion window is not full
  * @tp_vars: the private data of the current TP meter session
  * @payload_len: size of the payload of a single message
  *
@@ -783,7 +785,7 @@ static bool batadv_tp_avail(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_wait_available - wait until congestion window becomes free or
+ * batadv_tp_wait_available() - wait until congestion window becomes free or
  *  timeout is reached
  * @tp_vars: the private data of the current TP meter session
  * @plen: size of the payload of a single message
@@ -805,7 +807,7 @@ static int batadv_tp_wait_available(struct batadv_tp_vars *tp_vars, size_t plen)
 }
 
 /**
- * batadv_tp_send - main sending thread of a tp meter session
+ * batadv_tp_send() - main sending thread of a tp meter session
  * @arg: address of the related tp_vars
  *
  * Return: nothing, this function never returns
@@ -904,7 +906,8 @@ out:
 }
 
 /**
- * batadv_tp_start_kthread - start new thread which manages the tp meter sender
+ * batadv_tp_start_kthread() - start new thread which manages the tp meter
+ *  sender
  * @tp_vars: the private data of the current TP meter session
  */
 static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
@@ -935,7 +938,7 @@ static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_start - start a new tp meter session
+ * batadv_tp_start() - start a new tp meter session
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the receiver MAC address
  * @test_length: test length in milliseconds
@@ -1060,7 +1063,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
 }
 
 /**
- * batadv_tp_stop - stop currently running tp meter session
+ * batadv_tp_stop() - stop currently running tp meter session
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the receiver MAC address
  * @return_value: reason for tp meter session stop
@@ -1092,7 +1095,7 @@ out:
 }
 
 /**
- * batadv_tp_reset_receiver_timer - reset the receiver shutdown timer
+ * batadv_tp_reset_receiver_timer() - reset the receiver shutdown timer
  * @tp_vars: the private data of the current TP meter session
  *
  * start the receiver shutdown timer or reset it if already started
@@ -1104,7 +1107,7 @@ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is
+ * batadv_tp_receiver_shutdown() - stop a tp meter receiver when timeout is
  *  reached without received ack
  * @t: address to timer_list inside tp_vars
  */
@@ -1149,7 +1152,7 @@ static void batadv_tp_receiver_shutdown(struct timer_list *t)
 }
 
 /**
- * batadv_tp_send_ack - send an ACK packet
+ * batadv_tp_send_ack() - send an ACK packet
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the mac address of the destination originator
  * @seq: the sequence number to ACK
@@ -1221,7 +1224,7 @@ out:
 }
 
 /**
- * batadv_tp_handle_out_of_order - store an out of order packet
+ * batadv_tp_handle_out_of_order() - store an out of order packet
  * @tp_vars: the private data of the current TP meter session
  * @skb: the buffer containing the received packet
  *
@@ -1297,7 +1300,7 @@ out:
 }
 
 /**
- * batadv_tp_ack_unordered - update number received bytes in current stream
+ * batadv_tp_ack_unordered() - update number received bytes in current stream
  *  without gaps
  * @tp_vars: the private data of the current TP meter session
  */
@@ -1330,7 +1333,7 @@ static void batadv_tp_ack_unordered(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_init_recv - return matching or create new receiver tp_vars
+ * batadv_tp_init_recv() - return matching or create new receiver tp_vars
  * @bat_priv: the bat priv with all the soft interface information
  * @icmp: received icmp tp msg
  *
@@ -1383,7 +1386,7 @@ out_unlock:
 }
 
 /**
- * batadv_tp_recv_msg - process a single data message
+ * batadv_tp_recv_msg() - process a single data message
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the buffer containing the received packet
  *
@@ -1468,7 +1471,7 @@ out:
 }
 
 /**
- * batadv_tp_meter_recv - main TP Meter receiving function
+ * batadv_tp_meter_recv() - main TP Meter receiving function
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the buffer containing the received packet
  */
@@ -1494,7 +1497,7 @@ void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb)
 }
 
 /**
- * batadv_tp_meter_init - initialize global tp_meter structures
+ * batadv_tp_meter_init() - initialize global tp_meter structures
  */
 void __init batadv_tp_meter_init(void)
 {
index a8ada5c123bd95455412b822b5ff78d7a538b9e8..c8b8f2cb2c2b507b3ef16426ffb34b3146e18c86 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Edo Monticelli, Antonio Quartulli
index 8a3ce79b1307b7f260ce2f64e96bdacfb9a322f0..7550a9ccd69537d55019b124834503283313607b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
 
 #include <linux/atomic.h>
 #include <linux/bitops.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/cache.h>
 #include <linux/compiler.h>
 #include <linux/crc32c.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/jhash.h>
@@ -36,6 +37,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
+#include <linux/net.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/rculist.h>
@@ -50,6 +52,7 @@
 #include <net/genetlink.h>
 #include <net/netlink.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bridge_loop_avoidance.h"
@@ -58,7 +61,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "soft-interface.h"
 #include "tvlv.h"
 
@@ -86,7 +88,7 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
                                 bool roaming);
 
 /**
- * batadv_compare_tt - check if two TT entries are the same
+ * batadv_compare_tt() - check if two TT entries are the same
  * @node: the list element pointer of the first TT entry
  * @data2: pointer to the tt_common_entry of the second TT entry
  *
@@ -105,7 +107,7 @@ static bool batadv_compare_tt(const struct hlist_node *node, const void *data2)
 }
 
 /**
- * batadv_choose_tt - return the index of the tt entry in the hash table
+ * batadv_choose_tt() - return the index of the tt entry in the hash table
  * @data: pointer to the tt_common_entry object to map
  * @size: the size of the hash table
  *
@@ -125,7 +127,7 @@ static inline u32 batadv_choose_tt(const void *data, u32 size)
 }
 
 /**
- * batadv_tt_hash_find - look for a client in the given hash table
+ * batadv_tt_hash_find() - look for a client in the given hash table
  * @hash: the hash table to search
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
@@ -170,7 +172,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr,
 }
 
 /**
- * batadv_tt_local_hash_find - search the local table for a given client
+ * batadv_tt_local_hash_find() - search the local table for a given client
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
@@ -195,7 +197,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 }
 
 /**
- * batadv_tt_global_hash_find - search the global table for a given client
+ * batadv_tt_global_hash_find() - search the global table for a given client
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
@@ -220,7 +222,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 }
 
 /**
- * batadv_tt_local_entry_free_rcu - free the tt_local_entry
+ * batadv_tt_local_entry_free_rcu() - free the tt_local_entry
  * @rcu: rcu pointer of the tt_local_entry
  */
 static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
@@ -234,7 +236,7 @@ static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_tt_local_entry_release - release tt_local_entry from lists and queue
+ * batadv_tt_local_entry_release() - release tt_local_entry from lists and queue
  *  for free after rcu grace period
  * @ref: kref pointer of the nc_node
  */
@@ -251,7 +253,7 @@ static void batadv_tt_local_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_local_entry_put - decrement the tt_local_entry refcounter and
+ * batadv_tt_local_entry_put() - decrement the tt_local_entry refcounter and
  *  possibly release it
  * @tt_local_entry: tt_local_entry to be free'd
  */
@@ -263,7 +265,7 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
 }
 
 /**
- * batadv_tt_global_entry_free_rcu - free the tt_global_entry
+ * batadv_tt_global_entry_free_rcu() - free the tt_global_entry
  * @rcu: rcu pointer of the tt_global_entry
  */
 static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
@@ -277,8 +279,8 @@ static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_tt_global_entry_release - release tt_global_entry from lists and queue
- *  for free after rcu grace period
+ * batadv_tt_global_entry_release() - release tt_global_entry from lists and
+ *  queue for free after rcu grace period
  * @ref: kref pointer of the nc_node
  */
 static void batadv_tt_global_entry_release(struct kref *ref)
@@ -294,7 +296,7 @@ static void batadv_tt_global_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_global_entry_put - decrement the tt_global_entry refcounter and
+ * batadv_tt_global_entry_put() - decrement the tt_global_entry refcounter and
  *  possibly release it
  * @tt_global_entry: tt_global_entry to be free'd
  */
@@ -306,7 +308,7 @@ batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry)
 }
 
 /**
- * batadv_tt_global_hash_count - count the number of orig entries
+ * batadv_tt_global_hash_count() - count the number of orig entries
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to count entries for
  * @vid: VLAN identifier
@@ -331,8 +333,8 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_size_mod - change the size by v of the local table identified
- *  by vid
+ * batadv_tt_local_size_mod() - change the size by v of the local table
+ *  identified by vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier of the sub-table to change
  * @v: the amount to sum to the local table size
@@ -352,8 +354,8 @@ static void batadv_tt_local_size_mod(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_size_inc - increase by one the local table size for the given
- *  vid
+ * batadv_tt_local_size_inc() - increase by one the local table size for the
+ *  given vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  */
@@ -364,8 +366,8 @@ static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_size_dec - decrease by one the local table size for the given
- *  vid
+ * batadv_tt_local_size_dec() - decrease by one the local table size for the
+ *  given vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  */
@@ -376,7 +378,7 @@ static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_size_mod - change the size by v of the global table
+ * batadv_tt_global_size_mod() - change the size by v of the global table
  *  for orig_node identified by vid
  * @orig_node: the originator for which the table has to be modified
  * @vid: the VLAN identifier
@@ -404,7 +406,7 @@ static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_global_size_inc - increase by one the global table size for the
+ * batadv_tt_global_size_inc() - increase by one the global table size for the
  *  given vid
  * @orig_node: the originator which global table size has to be decreased
  * @vid: the vlan identifier
@@ -416,7 +418,7 @@ static void batadv_tt_global_size_inc(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_global_size_dec - decrease by one the global table size for the
+ * batadv_tt_global_size_dec() - decrease by one the global table size for the
  *  given vid
  * @orig_node: the originator which global table size has to be decreased
  * @vid: the vlan identifier
@@ -428,7 +430,7 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_orig_list_entry_free_rcu - free the orig_entry
+ * batadv_tt_orig_list_entry_free_rcu() - free the orig_entry
  * @rcu: rcu pointer of the orig_entry
  */
 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
@@ -441,7 +443,7 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_tt_orig_list_entry_release - release tt orig entry from lists and
+ * batadv_tt_orig_list_entry_release() - release tt orig entry from lists and
  *  queue for free after rcu grace period
  * @ref: kref pointer of the tt orig entry
  */
@@ -457,7 +459,7 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_orig_list_entry_put - decrement the tt orig entry refcounter and
+ * batadv_tt_orig_list_entry_put() - decrement the tt orig entry refcounter and
  *  possibly release it
  * @orig_entry: tt orig entry to be free'd
  */
@@ -468,7 +470,7 @@ batadv_tt_orig_list_entry_put(struct batadv_tt_orig_list_entry *orig_entry)
 }
 
 /**
- * batadv_tt_local_event - store a local TT event (ADD/DEL)
+ * batadv_tt_local_event() - store a local TT event (ADD/DEL)
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_local_entry: the TT entry involved in the event
  * @event_flags: flags to store in the event structure
@@ -543,7 +545,7 @@ unlock:
 }
 
 /**
- * batadv_tt_len - compute length in bytes of given number of tt changes
+ * batadv_tt_len() - compute length in bytes of given number of tt changes
  * @changes_num: number of tt changes
  *
  * Return: computed length in bytes.
@@ -554,7 +556,7 @@ static int batadv_tt_len(int changes_num)
 }
 
 /**
- * batadv_tt_entries - compute the number of entries fitting in tt_len bytes
+ * batadv_tt_entries() - compute the number of entries fitting in tt_len bytes
  * @tt_len: available space
  *
  * Return: the number of entries.
@@ -565,8 +567,8 @@ static u16 batadv_tt_entries(u16 tt_len)
 }
 
 /**
- * batadv_tt_local_table_transmit_size - calculates the local translation table
- *  size when transmitted over the air
+ * batadv_tt_local_table_transmit_size() - calculates the local translation
+ *  table size when transmitted over the air
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: local translation table size in bytes.
@@ -625,7 +627,7 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_add - add a new client to the local table or update an
+ * batadv_tt_local_add() - add a new client to the local table or update an
  *  existing client
  * @soft_iface: netdev struct of the mesh interface
  * @addr: the mac address of the client to add
@@ -830,7 +832,7 @@ out:
 }
 
 /**
- * batadv_tt_prepare_tvlv_global_data - prepare the TVLV TT header to send
+ * batadv_tt_prepare_tvlv_global_data() - prepare the TVLV TT header to send
  *  within a TT Response directed to another node
  * @orig_node: originator for which the TT data has to be prepared
  * @tt_data: uninitialised pointer to the address of the TVLV buffer
@@ -903,8 +905,8 @@ out:
 }
 
 /**
- * batadv_tt_prepare_tvlv_local_data - allocate and prepare the TT TVLV for this
- *  node
+ * batadv_tt_prepare_tvlv_local_data() - allocate and prepare the TT TVLV for
+ *  this node
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: uninitialised pointer to the address of the TVLV buffer
  * @tt_change: uninitialised pointer to the address of the area where the TT
@@ -977,8 +979,8 @@ out:
 }
 
 /**
- * batadv_tt_tvlv_container_update - update the translation table tvlv container
- *  after local tt changes have been committed
+ * batadv_tt_tvlv_container_update() - update the translation table tvlv
+ *  container after local tt changes have been committed
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
@@ -1053,6 +1055,14 @@ container_register:
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_tt_local_seq_print_text() - Print the local tt table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1123,7 +1133,7 @@ out:
 #endif
 
 /**
- * batadv_tt_local_dump_entry - Dump one TT local entry into a message
+ * batadv_tt_local_dump_entry() - Dump one TT local entry into a message
  * @msg :Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -1179,7 +1189,7 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_local_dump_bucket - Dump one TT local bucket into a message
+ * batadv_tt_local_dump_bucket() - Dump one TT local bucket into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -1216,7 +1226,7 @@ batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_local_dump - Dump TT local entries into a message
+ * batadv_tt_local_dump() - Dump TT local entries into a message
  * @msg: Netlink message to dump into
  * @cb: Parameters from query
  *
@@ -1300,7 +1310,7 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_remove - logically remove an entry from the local table
+ * batadv_tt_local_remove() - logically remove an entry from the local table
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the MAC address of the client to remove
  * @vid: VLAN identifier
@@ -1362,7 +1372,7 @@ out:
 }
 
 /**
- * batadv_tt_local_purge_list - purge inactive tt local entries
+ * batadv_tt_local_purge_list() - purge inactive tt local entries
  * @bat_priv: the bat priv with all the soft interface information
  * @head: pointer to the list containing the local tt entries
  * @timeout: parameter deciding whether a given tt local entry is considered
@@ -1397,7 +1407,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_purge - purge inactive tt local entries
+ * batadv_tt_local_purge() - purge inactive tt local entries
  * @bat_priv: the bat priv with all the soft interface information
  * @timeout: parameter deciding whether a given tt local entry is considered
  *  inactive or not
@@ -1490,7 +1500,7 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_global_orig_entry_find - find a TT orig_list_entry
+ * batadv_tt_global_orig_entry_find() - find a TT orig_list_entry
  * @entry: the TT global entry where the orig_list_entry has to be
  *  extracted from
  * @orig_node: the originator for which the orig_list_entry has to be found
@@ -1524,8 +1534,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
 }
 
 /**
- * batadv_tt_global_entry_has_orig - check if a TT global entry is also handled
- *  by a given originator
+ * batadv_tt_global_entry_has_orig() - check if a TT global entry is also
+ *  handled by a given originator
  * @entry: the TT global entry to check
  * @orig_node: the originator to search in the list
  *
@@ -1550,7 +1560,7 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
 }
 
 /**
- * batadv_tt_global_sync_flags - update TT sync flags
+ * batadv_tt_global_sync_flags() - update TT sync flags
  * @tt_global: the TT global entry to update sync flags in
  *
  * Updates the sync flag bits in the tt_global flag attribute with a logical
@@ -1574,7 +1584,7 @@ batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
 }
 
 /**
- * batadv_tt_global_orig_entry_add - add or update a TT orig entry
+ * batadv_tt_global_orig_entry_add() - add or update a TT orig entry
  * @tt_global: the TT global entry to add an orig entry in
  * @orig_node: the originator to add an orig entry for
  * @ttvn: translation table version number of this changeset
@@ -1624,7 +1634,7 @@ out:
 }
 
 /**
- * batadv_tt_global_add - add a new TT global entry or update an existing one
+ * batadv_tt_global_add() - add a new TT global entry or update an existing one
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the originator announcing the client
  * @tt_addr: the mac address of the non-mesh client
@@ -1796,7 +1806,7 @@ out:
 }
 
 /**
- * batadv_transtable_best_orig - Get best originator list entry from tt entry
+ * batadv_transtable_best_orig() - Get best originator list entry from tt entry
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be analyzed
  *
@@ -1842,8 +1852,8 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_tt_global_print_entry - print all orig nodes who announce the address
- *  for this global entry
+ * batadv_tt_global_print_entry() - print all orig nodes who announce the
+ *  address for this global entry
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be printed
  * @seq: debugfs table seq_file struct
@@ -1925,6 +1935,13 @@ print_list:
        }
 }
 
+/**
+ * batadv_tt_global_seq_print_text() - Print the global tt table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1967,7 +1984,7 @@ out:
 #endif
 
 /**
- * batadv_tt_global_dump_subentry - Dump all TT local entries into a message
+ * batadv_tt_global_dump_subentry() - Dump all TT local entries into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2028,7 +2045,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_global_dump_entry - Dump one TT global entry into a message
+ * batadv_tt_global_dump_entry() - Dump one TT global entry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2073,7 +2090,7 @@ batadv_tt_global_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_global_dump_bucket - Dump one TT local bucket into a message
+ * batadv_tt_global_dump_bucket() - Dump one TT local bucket into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2112,7 +2129,7 @@ batadv_tt_global_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_global_dump -  Dump TT global entries into a message
+ * batadv_tt_global_dump() -  Dump TT global entries into a message
  * @msg: Netlink message to dump into
  * @cb: Parameters from query
  *
@@ -2180,7 +2197,7 @@ int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb)
 }
 
 /**
- * _batadv_tt_global_del_orig_entry - remove and free an orig_entry
+ * _batadv_tt_global_del_orig_entry() - remove and free an orig_entry
  * @tt_global_entry: the global entry to remove the orig_entry from
  * @orig_entry: the orig entry to remove and free
  *
@@ -2222,7 +2239,7 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
 }
 
 /**
- * batadv_tt_global_del_orig_node - remove orig_node from a global tt entry
+ * batadv_tt_global_del_orig_node() - remove orig_node from a global tt entry
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: the global entry to remove the orig_node from
  * @orig_node: the originator announcing the client
@@ -2301,7 +2318,7 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_del - remove a client from the global table
+ * batadv_tt_global_del() - remove a client from the global table
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: an originator serving this client
  * @addr: the mac address of the client
@@ -2367,8 +2384,8 @@ out:
 }
 
 /**
- * batadv_tt_global_del_orig - remove all the TT global entries belonging to the
- *  given originator matching the provided vid
+ * batadv_tt_global_del_orig() - remove all the TT global entries belonging to
+ *  the given originator matching the provided vid
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the originator owning the entries to remove
  * @match_vid: the VLAN identifier to match. If negative all the entries will be
@@ -2539,7 +2556,7 @@ _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
 }
 
 /**
- * batadv_transtable_search - get the mesh destination for a given client
+ * batadv_transtable_search() - get the mesh destination for a given client
  * @bat_priv: the bat priv with all the soft interface information
  * @src: mac address of the source client
  * @addr: mac address of the destination client
@@ -2599,7 +2616,7 @@ out:
 }
 
 /**
- * batadv_tt_global_crc - calculates the checksum of the local table belonging
+ * batadv_tt_global_crc() - calculates the checksum of the local table belonging
  *  to the given orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator for which the CRC should be computed
@@ -2694,7 +2711,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_crc - calculates the checksum of the local table
+ * batadv_tt_local_crc() - calculates the checksum of the local table
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: VLAN identifier for which the CRC32 has to be computed
  *
@@ -2751,7 +2768,7 @@ static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_req_node_release - free tt_req node entry
+ * batadv_tt_req_node_release() - free tt_req node entry
  * @ref: kref pointer of the tt req_node entry
  */
 static void batadv_tt_req_node_release(struct kref *ref)
@@ -2764,7 +2781,7 @@ static void batadv_tt_req_node_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_req_node_put - decrement the tt_req_node refcounter and
+ * batadv_tt_req_node_put() - decrement the tt_req_node refcounter and
  *  possibly release it
  * @tt_req_node: tt_req_node to be free'd
  */
@@ -2826,7 +2843,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_req_node_new - search and possibly create a tt_req_node object
+ * batadv_tt_req_node_new() - search and possibly create a tt_req_node object
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node this request is being issued for
  *
@@ -2863,7 +2880,7 @@ unlock:
 }
 
 /**
- * batadv_tt_local_valid - verify that given tt entry is a valid one
+ * batadv_tt_local_valid() - verify that given tt entry is a valid one
  * @entry_ptr: to be checked local tt entry
  * @data_ptr: not used but definition required to satisfy the callback prototype
  *
@@ -2897,7 +2914,7 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
 }
 
 /**
- * batadv_tt_tvlv_generate - fill the tvlv buff with the tt entries from the
+ * batadv_tt_tvlv_generate() - fill the tvlv buff with the tt entries from the
  *  specified tt hash
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: hash table containing the tt entries
@@ -2948,7 +2965,7 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_check_crc - check if all the CRCs are correct
+ * batadv_tt_global_check_crc() - check if all the CRCs are correct
  * @orig_node: originator for which the CRCs have to be checked
  * @tt_vlan: pointer to the first tvlv VLAN entry
  * @num_vlan: number of tvlv VLAN entries
@@ -3005,7 +3022,7 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_local_update_crc - update all the local CRCs
+ * batadv_tt_local_update_crc() - update all the local CRCs
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
@@ -3021,7 +3038,7 @@ static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_global_update_crc - update all the global CRCs for this orig_node
+ * batadv_tt_global_update_crc() - update all the global CRCs for this orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the orig_node for which the CRCs have to be updated
  */
@@ -3048,7 +3065,7 @@ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_tt_request - send a TT Request message to a given node
+ * batadv_send_tt_request() - send a TT Request message to a given node
  * @bat_priv: the bat priv with all the soft interface information
  * @dst_orig_node: the destination of the message
  * @ttvn: the version number that the source of the message is looking for
@@ -3137,7 +3154,7 @@ out:
 }
 
 /**
- * batadv_send_other_tt_response - send reply to tt request concerning another
+ * batadv_send_other_tt_response() - send reply to tt request concerning another
  *  node's translation table
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
@@ -3270,8 +3287,8 @@ out:
 }
 
 /**
- * batadv_send_my_tt_response - send reply to tt request concerning this node's
- *  translation table
+ * batadv_send_my_tt_response() - send reply to tt request concerning this
+ *  node's translation table
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
  * @req_src: mac address of tt request sender
@@ -3388,7 +3405,7 @@ out:
 }
 
 /**
- * batadv_send_tt_response - send reply to tt request
+ * batadv_send_tt_response() - send reply to tt request
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
  * @req_src: mac address of tt request sender
@@ -3484,7 +3501,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_is_my_client - check if a client is served by the local node
+ * batadv_is_my_client() - check if a client is served by the local node
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to check
  * @vid: VLAN identifier
@@ -3514,7 +3531,7 @@ out:
 }
 
 /**
- * batadv_handle_tt_response - process incoming tt reply
+ * batadv_handle_tt_response() - process incoming tt reply
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
  * @resp_src: mac address of tt reply sender
@@ -3607,7 +3624,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_check_roam_count - check if a client has roamed too frequently
+ * batadv_tt_check_roam_count() - check if a client has roamed too frequently
  * @bat_priv: the bat priv with all the soft interface information
  * @client: mac address of the roaming client
  *
@@ -3662,7 +3679,7 @@ unlock:
 }
 
 /**
- * batadv_send_roam_adv - send a roaming advertisement message
+ * batadv_send_roam_adv() - send a roaming advertisement message
  * @bat_priv: the bat priv with all the soft interface information
  * @client: mac address of the roaming client
  * @vid: VLAN identifier
@@ -3727,6 +3744,10 @@ static void batadv_tt_purge(struct work_struct *work)
                           msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
 }
 
+/**
+ * batadv_tt_free() - Free translation table of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_tt_free(struct batadv_priv *bat_priv)
 {
        batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
@@ -3744,7 +3765,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_local_set_flags - set or unset the specified flags on the local
+ * batadv_tt_local_set_flags() - set or unset the specified flags on the local
  *  table and possibly count them in the TT size
  * @bat_priv: the bat priv with all the soft interface information
  * @flags: the flag to switch
@@ -3830,7 +3851,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_local_commit_changes_nolock - commit all pending local tt changes
+ * batadv_tt_local_commit_changes_nolock() - commit all pending local tt changes
  *  which have been queued in the time since the last commit
  * @bat_priv: the bat priv with all the soft interface information
  *
@@ -3863,7 +3884,7 @@ static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_local_commit_changes - commit all pending local tt changes which
+ * batadv_tt_local_commit_changes() - commit all pending local tt changes which
  *  have been queued in the time since the last commit
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -3874,6 +3895,15 @@ void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv)
        spin_unlock_bh(&bat_priv->tt.commit_lock);
 }
 
+/**
+ * batadv_is_ap_isolated() - Check if packet from upper layer should be dropped
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: source mac address of packet
+ * @dst: destination mac address of packet
+ * @vid: vlan id of packet
+ *
+ * Return: true when src+dst(+vid) pair should be isolated, false otherwise
+ */
 bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
                           unsigned short vid)
 {
@@ -3909,7 +3939,7 @@ vlan_put:
 }
 
 /**
- * batadv_tt_update_orig - update global translation table with new tt
+ * batadv_tt_update_orig() - update global translation table with new tt
  *  information received via ogms
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the orig_node of the ogm
@@ -3994,7 +4024,7 @@ request_table:
 }
 
 /**
- * batadv_tt_global_client_is_roaming - check if a client is marked as roaming
+ * batadv_tt_global_client_is_roaming() - check if a client is marked as roaming
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to check
  * @vid: VLAN identifier
@@ -4020,7 +4050,7 @@ out:
 }
 
 /**
- * batadv_tt_local_client_is_roaming - tells whether the client is roaming
+ * batadv_tt_local_client_is_roaming() - tells whether the client is roaming
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the local client to query
  * @vid: VLAN identifier
@@ -4045,6 +4075,15 @@ out:
        return ret;
 }
 
+/**
+ * batadv_tt_add_temporary_global_entry() - Add temporary entry to global TT
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which the temporary entry should be associated with
+ * @addr: mac address of the client
+ * @vid: VLAN id of the new temporary global translation table
+ *
+ * Return: true when temporary tt entry could be added, false otherwise
+ */
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
                                          struct batadv_orig_node *orig_node,
                                          const unsigned char *addr,
@@ -4069,7 +4108,7 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_resize_to_mtu - resize the local translation table fit the
+ * batadv_tt_local_resize_to_mtu() - resize the local translation table fit the
  *  maximum packet size that can be transported through the mesh
  * @soft_iface: netdev struct of the mesh interface
  *
@@ -4110,7 +4149,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
 }
 
 /**
- * batadv_tt_tvlv_ogm_handler_v1 - process incoming tt tvlv container
+ * batadv_tt_tvlv_ogm_handler_v1() - process incoming tt tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -4149,7 +4188,7 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_tvlv_unicast_handler_v1 - process incoming (unicast) tt tvlv
+ * batadv_tt_tvlv_unicast_handler_v1() - process incoming (unicast) tt tvlv
  *  container
  * @bat_priv: the bat priv with all the soft interface information
  * @src: mac address of tt tvlv sender
@@ -4231,7 +4270,8 @@ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_roam_tvlv_unicast_handler_v1 - process incoming tt roam tvlv container
+ * batadv_roam_tvlv_unicast_handler_v1() - process incoming tt roam tvlv
+ *  container
  * @bat_priv: the bat priv with all the soft interface information
  * @src: mac address of tt tvlv sender
  * @dst: mac address of tt tvlv recipient
@@ -4281,7 +4321,7 @@ out:
 }
 
 /**
- * batadv_tt_init - initialise the translation table internals
+ * batadv_tt_init() - initialise the translation table internals
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or negative error number in case of failure.
@@ -4317,7 +4357,7 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_global_is_isolated - check if a client is marked as isolated
+ * batadv_tt_global_is_isolated() - check if a client is marked as isolated
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client
  * @vid: the identifier of the VLAN where this client is connected
@@ -4343,7 +4383,7 @@ bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_cache_init - Initialize tt memory object cache
+ * batadv_tt_cache_init() - Initialize tt memory object cache
  *
  * Return: 0 on success or negative error number in case of failure.
  */
@@ -4412,7 +4452,7 @@ err_tt_tl_destroy:
 }
 
 /**
- * batadv_tt_cache_destroy - Destroy tt memory object cache
+ * batadv_tt_cache_destroy() - Destroy tt memory object cache
  */
 void batadv_tt_cache_destroy(void)
 {
index 411d586191da619669e5bd1a17b85b8b46eb4dfb..8d9e3abec2c84e2d0f5453d06c1d59e41fc1d48e 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
index 1d9e267caec92801354e6f7947151e1e3dabf40a..5ffcb45ac6ffab4cf51298a64c31470a51e23e08 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -19,7 +20,7 @@
 
 #include <linux/byteorder/generic.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/kernel.h>
 #include <linux/kref.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "tvlv.h"
 
 /**
- * batadv_tvlv_handler_release - release tvlv handler from lists and queue for
+ * batadv_tvlv_handler_release() - release tvlv handler from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the tvlv
  */
@@ -55,7 +56,7 @@ static void batadv_tvlv_handler_release(struct kref *ref)
 }
 
 /**
- * batadv_tvlv_handler_put - decrement the tvlv container refcounter and
+ * batadv_tvlv_handler_put() - decrement the tvlv container refcounter and
  *  possibly release it
  * @tvlv_handler: the tvlv handler to free
  */
@@ -65,7 +66,7 @@ static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler)
 }
 
 /**
- * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
+ * batadv_tvlv_handler_get() - retrieve tvlv handler from the tvlv handler list
  *  based on the provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv handler type to look for
@@ -99,7 +100,7 @@ batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 }
 
 /**
- * batadv_tvlv_container_release - release tvlv from lists and free
+ * batadv_tvlv_container_release() - release tvlv from lists and free
  * @ref: kref pointer of the tvlv
  */
 static void batadv_tvlv_container_release(struct kref *ref)
@@ -111,7 +112,7 @@ static void batadv_tvlv_container_release(struct kref *ref)
 }
 
 /**
- * batadv_tvlv_container_put - decrement the tvlv container refcounter and
+ * batadv_tvlv_container_put() - decrement the tvlv container refcounter and
  *  possibly release it
  * @tvlv: the tvlv container to free
  */
@@ -121,7 +122,7 @@ static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv)
 }
 
 /**
- * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
+ * batadv_tvlv_container_get() - retrieve tvlv container from the tvlv container
  *  list based on the provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv container type to look for
@@ -155,7 +156,7 @@ batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 }
 
 /**
- * batadv_tvlv_container_list_size - calculate the size of the tvlv container
+ * batadv_tvlv_container_list_size() - calculate the size of the tvlv container
  *  list entries
  * @bat_priv: the bat priv with all the soft interface information
  *
@@ -180,8 +181,8 @@ static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
- *  list
+ * batadv_tvlv_container_remove() - remove tvlv container from the tvlv
+ *  container list
  * @bat_priv: the bat priv with all the soft interface information
  * @tvlv: the to be removed tvlv container
  *
@@ -204,7 +205,7 @@ static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_container_unregister - unregister tvlv container based on the
+ * batadv_tvlv_container_unregister() - unregister tvlv container based on the
  *  provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv container type to unregister
@@ -222,7 +223,7 @@ void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_container_register - register tvlv type, version and content
+ * batadv_tvlv_container_register() - register tvlv type, version and content
  *  to be propagated with each (primary interface) OGM
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv container type
@@ -267,7 +268,7 @@ void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
+ * batadv_tvlv_realloc_packet_buff() - reallocate packet buffer to accommodate
  *  requested packet size
  * @packet_buff: packet buffer
  * @packet_buff_len: packet buffer size
@@ -300,7 +301,7 @@ static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
 }
 
 /**
- * batadv_tvlv_container_ogm_append - append tvlv container content to given
+ * batadv_tvlv_container_ogm_append() - append tvlv container content to given
  *  OGM packet buffer
  * @bat_priv: the bat priv with all the soft interface information
  * @packet_buff: ogm packet buffer
@@ -353,7 +354,7 @@ end:
 }
 
 /**
- * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
+ * batadv_tvlv_call_handler() - parse the given tvlv buffer to call the
  *  appropriate handlers
  * @bat_priv: the bat priv with all the soft interface information
  * @tvlv_handler: tvlv callback function handling the tvlv content
@@ -407,7 +408,7 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
+ * batadv_tvlv_containers_process() - parse the given tvlv buffer to call the
  *  appropriate handlers
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
@@ -474,7 +475,7 @@ int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
+ * batadv_tvlv_ogm_receive() - process an incoming ogm and call the appropriate
  *  handlers
  * @bat_priv: the bat priv with all the soft interface information
  * @batadv_ogm_packet: ogm packet containing the tvlv containers
@@ -501,7 +502,7 @@ void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_handler_register - register tvlv handler based on the provided
+ * batadv_tvlv_handler_register() - register tvlv handler based on the provided
  *  type and version (both need to match) for ogm tvlv payload and/or unicast
  *  payload
  * @bat_priv: the bat priv with all the soft interface information
@@ -556,7 +557,7 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
+ * batadv_tvlv_handler_unregister() - unregister tvlv handler based on the
  *  provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv handler type to be unregistered
@@ -579,7 +580,7 @@ void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
+ * batadv_tvlv_unicast_send() - send a unicast packet with tvlv payload to the
  *  specified host
  * @bat_priv: the bat priv with all the soft interface information
  * @src: source mac address of the unicast packet
index 4d01400ada30bd03abc583e5c22c717ddd355428..a74df33f446d57473ba9bc633534ebafd8c3c3d3 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
index a62795868794103d7e712ba91def5997dc3a5779..bb1578410e0cfa390a496299e3c0ed73925e8410 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
 #include <linux/types.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
-#include "packet.h"
-
 struct seq_file;
 
 #ifdef CONFIG_BATMAN_ADV_DAT
@@ -54,13 +54,15 @@ struct seq_file;
 
 /**
  * enum batadv_dhcp_recipient - dhcp destination
- * @BATADV_DHCP_NO: packet is not a dhcp message
- * @BATADV_DHCP_TO_SERVER: dhcp message is directed to a server
- * @BATADV_DHCP_TO_CLIENT: dhcp message is directed to a client
  */
 enum batadv_dhcp_recipient {
+       /** @BATADV_DHCP_NO: packet is not a dhcp message */
        BATADV_DHCP_NO = 0,
+
+       /** @BATADV_DHCP_TO_SERVER: dhcp message is directed to a server */
        BATADV_DHCP_TO_SERVER,
+
+       /** @BATADV_DHCP_TO_CLIENT: dhcp message is directed to a client */
        BATADV_DHCP_TO_CLIENT,
 };
 
@@ -78,196 +80,274 @@ enum batadv_dhcp_recipient {
 
 /**
  * struct batadv_hard_iface_bat_iv - per hard-interface B.A.T.M.A.N. IV data
- * @ogm_buff: buffer holding the OGM packet
- * @ogm_buff_len: length of the OGM packet buffer
- * @ogm_seqno: OGM sequence number - used to identify each OGM
  */
 struct batadv_hard_iface_bat_iv {
+       /** @ogm_buff: buffer holding the OGM packet */
        unsigned char *ogm_buff;
+
+       /** @ogm_buff_len: length of the OGM packet buffer */
        int ogm_buff_len;
+
+       /** @ogm_seqno: OGM sequence number - used to identify each OGM */
        atomic_t ogm_seqno;
 };
 
 /**
  * enum batadv_v_hard_iface_flags - interface flags useful to B.A.T.M.A.N. V
- * @BATADV_FULL_DUPLEX: tells if the connection over this link is full-duplex
- * @BATADV_WARNING_DEFAULT: tells whether we have warned the user that no
- *  throughput data is available for this interface and that default values are
- *  assumed.
  */
 enum batadv_v_hard_iface_flags {
+       /**
+        * @BATADV_FULL_DUPLEX: tells if the connection over this link is
+        *  full-duplex
+        */
        BATADV_FULL_DUPLEX      = BIT(0),
+
+       /**
+        * @BATADV_WARNING_DEFAULT: tells whether we have warned the user that
+        *  no throughput data is available for this interface and that default
+        *  values are assumed.
+        */
        BATADV_WARNING_DEFAULT  = BIT(1),
 };
 
 /**
  * struct batadv_hard_iface_bat_v - per hard-interface B.A.T.M.A.N. V data
- * @elp_interval: time interval between two ELP transmissions
- * @elp_seqno: current ELP sequence number
- * @elp_skb: base skb containing the ELP message to send
- * @elp_wq: workqueue used to schedule ELP transmissions
- * @throughput_override: throughput override to disable link auto-detection
- * @flags: interface specific flags
  */
 struct batadv_hard_iface_bat_v {
+       /** @elp_interval: time interval between two ELP transmissions */
        atomic_t elp_interval;
+
+       /** @elp_seqno: current ELP sequence number */
        atomic_t elp_seqno;
+
+       /** @elp_skb: base skb containing the ELP message to send */
        struct sk_buff *elp_skb;
+
+       /** @elp_wq: workqueue used to schedule ELP transmissions */
        struct delayed_work elp_wq;
+
+       /**
+        * @throughput_override: throughput override to disable link
+        *  auto-detection
+        */
        atomic_t throughput_override;
+
+       /** @flags: interface specific flags */
        u8 flags;
 };
 
 /**
  * enum batadv_hard_iface_wifi_flags - Flags describing the wifi configuration
  *  of a batadv_hard_iface
- * @BATADV_HARDIF_WIFI_WEXT_DIRECT: it is a wext wifi device
- * @BATADV_HARDIF_WIFI_CFG80211_DIRECT: it is a cfg80211 wifi device
- * @BATADV_HARDIF_WIFI_WEXT_INDIRECT: link device is a wext wifi device
- * @BATADV_HARDIF_WIFI_CFG80211_INDIRECT: link device is a cfg80211 wifi device
  */
 enum batadv_hard_iface_wifi_flags {
+       /** @BATADV_HARDIF_WIFI_WEXT_DIRECT: it is a wext wifi device */
        BATADV_HARDIF_WIFI_WEXT_DIRECT = BIT(0),
+
+       /** @BATADV_HARDIF_WIFI_CFG80211_DIRECT: it is a cfg80211 wifi device */
        BATADV_HARDIF_WIFI_CFG80211_DIRECT = BIT(1),
+
+       /**
+        * @BATADV_HARDIF_WIFI_WEXT_INDIRECT: link device is a wext wifi device
+        */
        BATADV_HARDIF_WIFI_WEXT_INDIRECT = BIT(2),
+
+       /**
+        * @BATADV_HARDIF_WIFI_CFG80211_INDIRECT: link device is a cfg80211 wifi
+        * device
+        */
        BATADV_HARDIF_WIFI_CFG80211_INDIRECT = BIT(3),
 };
 
 /**
  * struct batadv_hard_iface - network device known to batman-adv
- * @list: list node for batadv_hardif_list
- * @if_num: identificator of the interface
- * @if_status: status of the interface for batman-adv
- * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
- * @wifi_flags: flags whether this is (directly or indirectly) a wifi interface
- * @net_dev: pointer to the net_device
- * @hardif_obj: kobject of the per interface sysfs "mesh" directory
- * @refcount: number of contexts the object is used
- * @batman_adv_ptype: packet type describing packets that should be processed by
- *  batman-adv for this interface
- * @soft_iface: the batman-adv interface which uses this network interface
- * @rcu: struct used for freeing in an RCU-safe manner
- * @bat_iv: per hard-interface B.A.T.M.A.N. IV data
- * @bat_v: per hard-interface B.A.T.M.A.N. V data
- * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
- * @neigh_list: list of unique single hop neighbors via this interface
- * @neigh_list_lock: lock protecting neigh_list
  */
 struct batadv_hard_iface {
+       /** @list: list node for batadv_hardif_list */
        struct list_head list;
+
+       /** @if_num: identificator of the interface */
        s16 if_num;
+
+       /** @if_status: status of the interface for batman-adv */
        char if_status;
+
+       /**
+        * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
+        */
        u8 num_bcasts;
+
+       /**
+        * @wifi_flags: flags whether this is (directly or indirectly) a wifi
+        *  interface
+        */
        u32 wifi_flags;
+
+       /** @net_dev: pointer to the net_device */
        struct net_device *net_dev;
+
+       /** @hardif_obj: kobject of the per interface sysfs "mesh" directory */
        struct kobject *hardif_obj;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /**
+        * @batman_adv_ptype: packet type describing packets that should be
+        * processed by batman-adv for this interface
+        */
        struct packet_type batman_adv_ptype;
+
+       /**
+        * @soft_iface: the batman-adv interface which uses this network
+        *  interface
+        */
        struct net_device *soft_iface;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
+
+       /** @bat_iv: per hard-interface B.A.T.M.A.N. IV data */
        struct batadv_hard_iface_bat_iv bat_iv;
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+       /** @bat_v: per hard-interface B.A.T.M.A.N. V data */
        struct batadv_hard_iface_bat_v bat_v;
 #endif
+
+       /**
+        * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
+        */
        struct dentry *debug_dir;
+
+       /**
+        * @neigh_list: list of unique single hop neighbors via this interface
+        */
        struct hlist_head neigh_list;
-       /* neigh_list_lock protects: neigh_list */
+
+       /** @neigh_list_lock: lock protecting neigh_list */
        spinlock_t neigh_list_lock;
 };
 
 /**
  * struct batadv_orig_ifinfo - originator info per outgoing interface
- * @list: list node for orig_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard-interface
- * @router: router that should be used to reach this originator
- * @last_real_seqno: last and best known sequence number
- * @last_ttl: ttl of last received packet
- * @last_seqno_forwarded: seqno of the OGM which was forwarded last
- * @batman_seqno_reset: time when the batman seqno window was reset
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_orig_ifinfo {
+       /** @list: list node for &batadv_orig_node.ifinfo_list */
        struct hlist_node list;
+
+       /** @if_outgoing: pointer to outgoing hard-interface */
        struct batadv_hard_iface *if_outgoing;
-       struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
+
+       /** @router: router that should be used to reach this originator */
+       struct batadv_neigh_node __rcu *router;
+
+       /** @last_real_seqno: last and best known sequence number */
        u32 last_real_seqno;
+
+       /** @last_ttl: ttl of last received packet */
        u8 last_ttl;
+
+       /** @last_seqno_forwarded: seqno of the OGM which was forwarded last */
        u32 last_seqno_forwarded;
+
+       /** @batman_seqno_reset: time when the batman seqno window was reset */
        unsigned long batman_seqno_reset;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_frag_table_entry - head in the fragment buffer table
- * @fragment_list: head of list with fragments
- * @lock: lock to protect the list of fragments
- * @timestamp: time (jiffie) of last received fragment
- * @seqno: sequence number of the fragments in the list
- * @size: accumulated size of packets in list
- * @total_size: expected size of the assembled packet
  */
 struct batadv_frag_table_entry {
+       /** @fragment_list: head of list with fragments */
        struct hlist_head fragment_list;
-       spinlock_t lock; /* protects fragment_list */
+
+       /** @lock: lock to protect the list of fragments */
+       spinlock_t lock;
+
+       /** @timestamp: time (jiffie) of last received fragment */
        unsigned long timestamp;
+
+       /** @seqno: sequence number of the fragments in the list */
        u16 seqno;
+
+       /** @size: accumulated size of packets in list */
        u16 size;
+
+       /** @total_size: expected size of the assembled packet */
        u16 total_size;
 };
 
 /**
  * struct batadv_frag_list_entry - entry in a list of fragments
- * @list: list node information
- * @skb: fragment
- * @no: fragment number in the set
  */
 struct batadv_frag_list_entry {
+       /** @list: list node information */
        struct hlist_node list;
+
+       /** @skb: fragment */
        struct sk_buff *skb;
+
+       /** @no: fragment number in the set */
        u8 no;
 };
 
 /**
  * struct batadv_vlan_tt - VLAN specific TT attributes
- * @crc: CRC32 checksum of the entries belonging to this vlan
- * @num_entries: number of TT entries for this VLAN
  */
 struct batadv_vlan_tt {
+       /** @crc: CRC32 checksum of the entries belonging to this vlan */
        u32 crc;
+
+       /** @num_entries: number of TT entries for this VLAN */
        atomic_t num_entries;
 };
 
 /**
  * struct batadv_orig_node_vlan - VLAN specific data per orig_node
- * @vid: the VLAN identifier
- * @tt: VLAN specific TT attributes
- * @list: list node for orig_node::vlan_list
- * @refcount: number of context where this object is currently in use
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_orig_node_vlan {
+       /** @vid: the VLAN identifier */
        unsigned short vid;
+
+       /** @tt: VLAN specific TT attributes */
        struct batadv_vlan_tt tt;
+
+       /** @list: list node for &batadv_orig_node.vlan_list */
        struct hlist_node list;
+
+       /**
+        * @refcount: number of context where this object is currently in use
+        */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in a RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
- * @bcast_own: set of bitfields (one per hard-interface) where each one counts
- * the number of our OGMs this orig_node rebroadcasted "back" to us  (relative
- * to last_real_seqno). Every bitfield is BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
- * @bcast_own_sum: sum of bcast_own
- * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
- *  neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
  */
 struct batadv_orig_bat_iv {
+       /**
+        * @bcast_own: set of bitfields (one per hard-interface) where each one
+        * counts the number of our OGMs this orig_node rebroadcasted "back" to
+        * us  (relative to last_real_seqno). Every bitfield is
+        * BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
+        */
        unsigned long *bcast_own;
+
+       /** @bcast_own_sum: sum of bcast_own */
        u8 *bcast_own_sum;
-       /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
+
+       /**
+        * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
         * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
         */
        spinlock_t ogm_cnt_lock;
@@ -275,130 +355,205 @@ struct batadv_orig_bat_iv {
 
 /**
  * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
- * @orig: originator ethernet address
- * @ifinfo_list: list for routers per outgoing interface
- * @last_bonding_candidate: pointer to last ifinfo of last used router
- * @dat_addr: address of the orig node in the distributed hash
- * @last_seen: time when last packet from this node was received
- * @bcast_seqno_reset: time when the broadcast seqno window was reset
- * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
- * @mcast_flags: multicast flags announced by the orig node
- * @mcast_want_all_unsnoopables_node: a list node for the
- *  mcast.want_all_unsnoopables list
- * @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4 list
- * @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6 list
- * @capabilities: announced capabilities of this originator
- * @capa_initialized: bitfield to remember whether a capability was initialized
- * @last_ttvn: last seen translation table version number
- * @tt_buff: last tt changeset this node received from the orig node
- * @tt_buff_len: length of the last tt changeset this node received from the
- *  orig node
- * @tt_buff_lock: lock that protects tt_buff and tt_buff_len
- * @tt_lock: prevents from updating the table while reading it. Table update is
- *  made up by two operations (data structure update and metdata -CRC/TTVN-
- *  recalculation) and they have to be executed atomically in order to avoid
- *  another thread to read the table/metadata between those.
- * @bcast_bits: bitfield containing the info which payload broadcast originated
- *  from this orig node this host already has seen (relative to
- *  last_bcast_seqno)
- * @last_bcast_seqno: last broadcast sequence number received by this host
- * @neigh_list: list of potential next hop neighbor towards this orig node
- * @neigh_list_lock: lock protecting neigh_list and router
- * @hash_entry: hlist node for batadv_priv::orig_hash
- * @bat_priv: pointer to soft_iface this orig node belongs to
- * @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
- * @in_coding_list: list of nodes this orig can hear
- * @out_coding_list: list of nodes that can hear this orig
- * @in_coding_list_lock: protects in_coding_list
- * @out_coding_list_lock: protects out_coding_list
- * @fragments: array with heads for fragment chains
- * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by the
- *  originator represented by this object
- * @vlan_list_lock: lock protecting vlan_list
- * @bat_iv: B.A.T.M.A.N. IV private structure
  */
 struct batadv_orig_node {
+       /** @orig: originator ethernet address */
        u8 orig[ETH_ALEN];
+
+       /** @ifinfo_list: list for routers per outgoing interface */
        struct hlist_head ifinfo_list;
+
+       /**
+        * @last_bonding_candidate: pointer to last ifinfo of last used router
+        */
        struct batadv_orig_ifinfo *last_bonding_candidate;
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+       /** @dat_addr: address of the orig node in the distributed hash */
        batadv_dat_addr_t dat_addr;
 #endif
+
+       /** @last_seen: time when last packet from this node was received */
        unsigned long last_seen;
+
+       /**
+        * @bcast_seqno_reset: time when the broadcast seqno window was reset
+        */
        unsigned long bcast_seqno_reset;
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
-       /* synchronizes mcast tvlv specific orig changes */
+       /**
+        * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
+        */
        spinlock_t mcast_handler_lock;
+
+       /** @mcast_flags: multicast flags announced by the orig node */
        u8 mcast_flags;
+
+       /**
+        * @mcast_want_all_unsnoopables_node: a list node for the
+        *  mcast.want_all_unsnoopables list
+        */
        struct hlist_node mcast_want_all_unsnoopables_node;
+
+       /**
+        * @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4
+        *  list
+        */
        struct hlist_node mcast_want_all_ipv4_node;
+       /**
+        * @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6
+        *  list
+        */
        struct hlist_node mcast_want_all_ipv6_node;
 #endif
+
+       /** @capabilities: announced capabilities of this originator */
        unsigned long capabilities;
+
+       /**
+        * @capa_initialized: bitfield to remember whether a capability was
+        *  initialized
+        */
        unsigned long capa_initialized;
+
+       /** @last_ttvn: last seen translation table version number */
        atomic_t last_ttvn;
+
+       /** @tt_buff: last tt changeset this node received from the orig node */
        unsigned char *tt_buff;
+
+       /**
+        * @tt_buff_len: length of the last tt changeset this node received
+        *  from the orig node
+        */
        s16 tt_buff_len;
-       spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
-       /* prevents from changing the table while reading it */
+
+       /** @tt_buff_lock: lock that protects tt_buff and tt_buff_len */
+       spinlock_t tt_buff_lock;
+
+       /**
+        * @tt_lock: prevents from updating the table while reading it. Table
+        *  update is made up by two operations (data structure update and
+        *  metdata -CRC/TTVN-recalculation) and they have to be executed
+        *  atomically in order to avoid another thread to read the
+        *  table/metadata between those.
+        */
        spinlock_t tt_lock;
+
+       /**
+        * @bcast_bits: bitfield containing the info which payload broadcast
+        *  originated from this orig node this host already has seen (relative
+        *  to last_bcast_seqno)
+        */
        DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+       /**
+        * @last_bcast_seqno: last broadcast sequence number received by this
+        *  host
+        */
        u32 last_bcast_seqno;
+
+       /**
+        * @neigh_list: list of potential next hop neighbor towards this orig
+        *  node
+        */
        struct hlist_head neigh_list;
-       /* neigh_list_lock protects: neigh_list, ifinfo_list,
-        * last_bonding_candidate and router
+
+       /**
+        * @neigh_list_lock: lock protecting neigh_list, ifinfo_list,
+        *  last_bonding_candidate and router
         */
        spinlock_t neigh_list_lock;
+
+       /** @hash_entry: hlist node for &batadv_priv.orig_hash */
        struct hlist_node hash_entry;
+
+       /** @bat_priv: pointer to soft_iface this orig node belongs to */
        struct batadv_priv *bat_priv;
-       /* bcast_seqno_lock protects: bcast_bits & last_bcast_seqno */
+
+       /** @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno */
        spinlock_t bcast_seqno_lock;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
+
 #ifdef CONFIG_BATMAN_ADV_NC
+       /** @in_coding_list: list of nodes this orig can hear */
        struct list_head in_coding_list;
+
+       /** @out_coding_list: list of nodes that can hear this orig */
        struct list_head out_coding_list;
-       spinlock_t in_coding_list_lock; /* Protects in_coding_list */
-       spinlock_t out_coding_list_lock; /* Protects out_coding_list */
+
+       /** @in_coding_list_lock: protects in_coding_list */
+       spinlock_t in_coding_list_lock;
+
+       /** @out_coding_list_lock: protects out_coding_list */
+       spinlock_t out_coding_list_lock;
 #endif
+
+       /** @fragments: array with heads for fragment chains */
        struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
+
+       /**
+        * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by
+        *  the originator represented by this object
+        */
        struct hlist_head vlan_list;
-       spinlock_t vlan_list_lock; /* protects vlan_list */
+
+       /** @vlan_list_lock: lock protecting vlan_list */
+       spinlock_t vlan_list_lock;
+
+       /** @bat_iv: B.A.T.M.A.N. IV private structure */
        struct batadv_orig_bat_iv bat_iv;
 };
 
 /**
  * enum batadv_orig_capabilities - orig node capabilities
- * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table enabled
- * @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled
- * @BATADV_ORIG_CAPA_HAS_TT: orig node has tt capability
- * @BATADV_ORIG_CAPA_HAS_MCAST: orig node has some multicast capability
- *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
  */
 enum batadv_orig_capabilities {
+       /**
+        * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table
+        *  enabled
+        */
        BATADV_ORIG_CAPA_HAS_DAT,
+
+       /** @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled */
        BATADV_ORIG_CAPA_HAS_NC,
+
+       /** @BATADV_ORIG_CAPA_HAS_TT: orig node has tt capability */
        BATADV_ORIG_CAPA_HAS_TT,
+
+       /**
+        * @BATADV_ORIG_CAPA_HAS_MCAST: orig node has some multicast capability
+        *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
+        */
        BATADV_ORIG_CAPA_HAS_MCAST,
 };
 
 /**
  * struct batadv_gw_node - structure for orig nodes announcing gw capabilities
- * @list: list node for batadv_priv_gw::list
- * @orig_node: pointer to corresponding orig node
- * @bandwidth_down: advertised uplink download bandwidth
- * @bandwidth_up: advertised uplink upload bandwidth
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_gw_node {
+       /** @list: list node for &batadv_priv_gw.list */
        struct hlist_node list;
+
+       /** @orig_node: pointer to corresponding orig node */
        struct batadv_orig_node *orig_node;
+
+       /** @bandwidth_down: advertised uplink download bandwidth */
        u32 bandwidth_down;
+
+       /** @bandwidth_up: advertised uplink upload bandwidth */
        u32 bandwidth_up;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
@@ -407,118 +562,161 @@ DECLARE_EWMA(throughput, 10, 8)
 /**
  * struct batadv_hardif_neigh_node_bat_v - B.A.T.M.A.N. V private neighbor
  *  information
- * @throughput: ewma link throughput towards this neighbor
- * @elp_interval: time interval between two ELP transmissions
- * @elp_latest_seqno: latest and best known ELP sequence number
- * @last_unicast_tx: when the last unicast packet has been sent to this neighbor
- * @metric_work: work queue callback item for metric update
  */
 struct batadv_hardif_neigh_node_bat_v {
+       /** @throughput: ewma link throughput towards this neighbor */
        struct ewma_throughput throughput;
+
+       /** @elp_interval: time interval between two ELP transmissions */
        u32 elp_interval;
+
+       /** @elp_latest_seqno: latest and best known ELP sequence number */
        u32 elp_latest_seqno;
+
+       /**
+        * @last_unicast_tx: when the last unicast packet has been sent to this
+        *  neighbor
+        */
        unsigned long last_unicast_tx;
+
+       /** @metric_work: work queue callback item for metric update */
        struct work_struct metric_work;
 };
 
 /**
  * struct batadv_hardif_neigh_node - unique neighbor per hard-interface
- * @list: list node for batadv_hard_iface::neigh_list
- * @addr: the MAC address of the neighboring interface
- * @orig: the address of the originator this neighbor node belongs to
- * @if_incoming: pointer to incoming hard-interface
- * @last_seen: when last packet via this neighbor was received
- * @bat_v: B.A.T.M.A.N. V private data
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_hardif_neigh_node {
+       /** @list: list node for &batadv_hard_iface.neigh_list */
        struct hlist_node list;
+
+       /** @addr: the MAC address of the neighboring interface */
        u8 addr[ETH_ALEN];
+
+       /**
+        * @orig: the address of the originator this neighbor node belongs to
+        */
        u8 orig[ETH_ALEN];
+
+       /** @if_incoming: pointer to incoming hard-interface */
        struct batadv_hard_iface *if_incoming;
+
+       /** @last_seen: when last packet via this neighbor was received */
        unsigned long last_seen;
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+       /** @bat_v: B.A.T.M.A.N. V private data */
        struct batadv_hardif_neigh_node_bat_v bat_v;
 #endif
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in a RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_neigh_node - structure for single hops neighbors
- * @list: list node for batadv_orig_node::neigh_list
- * @orig_node: pointer to corresponding orig_node
- * @addr: the MAC address of the neighboring interface
- * @ifinfo_list: list for routing metrics per outgoing interface
- * @ifinfo_lock: lock protecting private ifinfo members and list
- * @if_incoming: pointer to incoming hard-interface
- * @last_seen: when last packet via this neighbor was received
- * @hardif_neigh: hardif_neigh of this neighbor
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_neigh_node {
+       /** @list: list node for &batadv_orig_node.neigh_list */
        struct hlist_node list;
+
+       /** @orig_node: pointer to corresponding orig_node */
        struct batadv_orig_node *orig_node;
+
+       /** @addr: the MAC address of the neighboring interface */
        u8 addr[ETH_ALEN];
+
+       /** @ifinfo_list: list for routing metrics per outgoing interface */
        struct hlist_head ifinfo_list;
-       spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */
+
+       /** @ifinfo_lock: lock protecting ifinfo_list and its members */
+       spinlock_t ifinfo_lock;
+
+       /** @if_incoming: pointer to incoming hard-interface */
        struct batadv_hard_iface *if_incoming;
+
+       /** @last_seen: when last packet via this neighbor was received */
        unsigned long last_seen;
+
+       /** @hardif_neigh: hardif_neigh of this neighbor */
        struct batadv_hardif_neigh_node *hardif_neigh;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_neigh_ifinfo_bat_iv - neighbor information per outgoing
  *  interface for B.A.T.M.A.N. IV
- * @tq_recv: ring buffer of received TQ values from this neigh node
- * @tq_index: ring buffer index
- * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
- * @real_bits: bitfield containing the number of OGMs received from this neigh
- *  node (relative to orig_node->last_real_seqno)
- * @real_packet_count: counted result of real_bits
  */
 struct batadv_neigh_ifinfo_bat_iv {
+       /** @tq_recv: ring buffer of received TQ values from this neigh node */
        u8 tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
+
+       /** @tq_index: ring buffer index */
        u8 tq_index;
+
+       /**
+        * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
+        */
        u8 tq_avg;
+
+       /**
+        * @real_bits: bitfield containing the number of OGMs received from this
+        *  neigh node (relative to orig_node->last_real_seqno)
+        */
        DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+       /** @real_packet_count: counted result of real_bits */
        u8 real_packet_count;
 };
 
 /**
  * struct batadv_neigh_ifinfo_bat_v - neighbor information per outgoing
  *  interface for B.A.T.M.A.N. V
- * @throughput: last throughput metric received from originator via this neigh
- * @last_seqno: last sequence number known for this neighbor
  */
 struct batadv_neigh_ifinfo_bat_v {
+       /**
+        * @throughput: last throughput metric received from originator via this
+        *  neigh
+        */
        u32 throughput;
+
+       /** @last_seqno: last sequence number known for this neighbor */
        u32 last_seqno;
 };
 
 /**
  * struct batadv_neigh_ifinfo - neighbor information per outgoing interface
- * @list: list node for batadv_neigh_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard-interface
- * @bat_iv: B.A.T.M.A.N. IV private structure
- * @bat_v: B.A.T.M.A.N. V private data
- * @last_ttl: last received ttl from this neigh node
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_neigh_ifinfo {
+       /** @list: list node for &batadv_neigh_node.ifinfo_list */
        struct hlist_node list;
+
+       /** @if_outgoing: pointer to outgoing hard-interface */
        struct batadv_hard_iface *if_outgoing;
+
+       /** @bat_iv: B.A.T.M.A.N. IV private structure */
        struct batadv_neigh_ifinfo_bat_iv bat_iv;
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+       /** @bat_v: B.A.T.M.A.N. V private data */
        struct batadv_neigh_ifinfo_bat_v bat_v;
 #endif
+
+       /** @last_ttl: last received ttl from this neigh node */
        u8 last_ttl;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in a RCU-safe manner */
        struct rcu_head rcu;
 };
 
@@ -526,148 +724,278 @@ struct batadv_neigh_ifinfo {
 
 /**
  * struct batadv_bcast_duplist_entry - structure for LAN broadcast suppression
- * @orig: mac address of orig node orginating the broadcast
- * @crc: crc32 checksum of broadcast payload
- * @entrytime: time when the broadcast packet was received
  */
 struct batadv_bcast_duplist_entry {
+       /** @orig: mac address of orig node orginating the broadcast */
        u8 orig[ETH_ALEN];
+
+       /** @crc: crc32 checksum of broadcast payload */
        __be32 crc;
+
+       /** @entrytime: time when the broadcast packet was received */
        unsigned long entrytime;
 };
 #endif
 
 /**
  * enum batadv_counters - indices for traffic counters
- * @BATADV_CNT_TX: transmitted payload traffic packet counter
- * @BATADV_CNT_TX_BYTES: transmitted payload traffic bytes counter
- * @BATADV_CNT_TX_DROPPED: dropped transmission payload traffic packet counter
- * @BATADV_CNT_RX: received payload traffic packet counter
- * @BATADV_CNT_RX_BYTES: received payload traffic bytes counter
- * @BATADV_CNT_FORWARD: forwarded payload traffic packet counter
- * @BATADV_CNT_FORWARD_BYTES: forwarded payload traffic bytes counter
- * @BATADV_CNT_MGMT_TX: transmitted routing protocol traffic packet counter
- * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes counter
- * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
- * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes counter
- * @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter
- * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
- * @BATADV_CNT_FRAG_RX: received fragment traffic packet counter
- * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
- * @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter
- * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
- * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
- * @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter
- * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet counter
- * @BATADV_CNT_TT_RESPONSE_RX: received tt resp traffic packet counter
- * @BATADV_CNT_TT_ROAM_ADV_TX: transmitted tt roam traffic packet counter
- * @BATADV_CNT_TT_ROAM_ADV_RX: received tt roam traffic packet counter
- * @BATADV_CNT_DAT_GET_TX: transmitted dht GET traffic packet counter
- * @BATADV_CNT_DAT_GET_RX: received dht GET traffic packet counter
- * @BATADV_CNT_DAT_PUT_TX: transmitted dht PUT traffic packet counter
- * @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter
- * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic packet
- *  counter
- * @BATADV_CNT_NC_CODE: transmitted nc-combined traffic packet counter
- * @BATADV_CNT_NC_CODE_BYTES: transmitted nc-combined traffic bytes counter
- * @BATADV_CNT_NC_RECODE: transmitted nc-recombined traffic packet counter
- * @BATADV_CNT_NC_RECODE_BYTES: transmitted nc-recombined traffic bytes counter
- * @BATADV_CNT_NC_BUFFER: counter for packets buffered for later nc decoding
- * @BATADV_CNT_NC_DECODE: received and nc-decoded traffic packet counter
- * @BATADV_CNT_NC_DECODE_BYTES: received and nc-decoded traffic bytes counter
- * @BATADV_CNT_NC_DECODE_FAILED: received and decode-failed traffic packet
- *  counter
- * @BATADV_CNT_NC_SNIFFED: counter for nc-decoded packets received in promisc
- *  mode.
- * @BATADV_CNT_NUM: number of traffic counters
  */
 enum batadv_counters {
+       /** @BATADV_CNT_TX: transmitted payload traffic packet counter */
        BATADV_CNT_TX,
+
+       /** @BATADV_CNT_TX_BYTES: transmitted payload traffic bytes counter */
        BATADV_CNT_TX_BYTES,
+
+       /**
+        * @BATADV_CNT_TX_DROPPED: dropped transmission payload traffic packet
+        *  counter
+        */
        BATADV_CNT_TX_DROPPED,
+
+       /** @BATADV_CNT_RX: received payload traffic packet counter */
        BATADV_CNT_RX,
+
+       /** @BATADV_CNT_RX_BYTES: received payload traffic bytes counter */
        BATADV_CNT_RX_BYTES,
+
+       /** @BATADV_CNT_FORWARD: forwarded payload traffic packet counter */
        BATADV_CNT_FORWARD,
+
+       /**
+        * @BATADV_CNT_FORWARD_BYTES: forwarded payload traffic bytes counter
+        */
        BATADV_CNT_FORWARD_BYTES,
+
+       /**
+        * @BATADV_CNT_MGMT_TX: transmitted routing protocol traffic packet
+        *  counter
+        */
        BATADV_CNT_MGMT_TX,
+
+       /**
+        * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes
+        *  counter
+        */
        BATADV_CNT_MGMT_TX_BYTES,
+
+       /**
+        * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
+        */
        BATADV_CNT_MGMT_RX,
+
+       /**
+        * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes
+        *  counter
+        */
        BATADV_CNT_MGMT_RX_BYTES,
+
+       /** @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter */
        BATADV_CNT_FRAG_TX,
+
+       /**
+        * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
+        */
        BATADV_CNT_FRAG_TX_BYTES,
+
+       /** @BATADV_CNT_FRAG_RX: received fragment traffic packet counter */
        BATADV_CNT_FRAG_RX,
+
+       /**
+        * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
+        */
        BATADV_CNT_FRAG_RX_BYTES,
+
+       /** @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter */
        BATADV_CNT_FRAG_FWD,
+
+       /**
+        * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
+        */
        BATADV_CNT_FRAG_FWD_BYTES,
+
+       /**
+        * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
+        */
        BATADV_CNT_TT_REQUEST_TX,
+
+       /** @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter */
        BATADV_CNT_TT_REQUEST_RX,
+
+       /**
+        * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet
+        *  counter
+        */
        BATADV_CNT_TT_RESPONSE_TX,
+
+       /**
+        * @BATADV_CNT_TT_RESPONSE_RX: received tt resp traffic packet counter
+        */
        BATADV_CNT_TT_RESPONSE_RX,
+
+       /**
+        * @BATADV_CNT_TT_ROAM_ADV_TX: transmitted tt roam traffic packet
+        *  counter
+        */
        BATADV_CNT_TT_ROAM_ADV_TX,
+
+       /**
+        * @BATADV_CNT_TT_ROAM_ADV_RX: received tt roam traffic packet counter
+        */
        BATADV_CNT_TT_ROAM_ADV_RX,
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+       /**
+        * @BATADV_CNT_DAT_GET_TX: transmitted dht GET traffic packet counter
+        */
        BATADV_CNT_DAT_GET_TX,
+
+       /** @BATADV_CNT_DAT_GET_RX: received dht GET traffic packet counter */
        BATADV_CNT_DAT_GET_RX,
+
+       /**
+        * @BATADV_CNT_DAT_PUT_TX: transmitted dht PUT traffic packet counter
+        */
        BATADV_CNT_DAT_PUT_TX,
+
+       /** @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter */
        BATADV_CNT_DAT_PUT_RX,
+
+       /**
+        * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic
+        *  packet counter
+        */
        BATADV_CNT_DAT_CACHED_REPLY_TX,
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_NC
+       /**
+        * @BATADV_CNT_NC_CODE: transmitted nc-combined traffic packet counter
+        */
        BATADV_CNT_NC_CODE,
+
+       /**
+        * @BATADV_CNT_NC_CODE_BYTES: transmitted nc-combined traffic bytes
+        *  counter
+        */
        BATADV_CNT_NC_CODE_BYTES,
+
+       /**
+        * @BATADV_CNT_NC_RECODE: transmitted nc-recombined traffic packet
+        *  counter
+        */
        BATADV_CNT_NC_RECODE,
+
+       /**
+        * @BATADV_CNT_NC_RECODE_BYTES: transmitted nc-recombined traffic bytes
+        *  counter
+        */
        BATADV_CNT_NC_RECODE_BYTES,
+
+       /**
+        * @BATADV_CNT_NC_BUFFER: counter for packets buffered for later nc
+        *  decoding
+        */
        BATADV_CNT_NC_BUFFER,
+
+       /**
+        * @BATADV_CNT_NC_DECODE: received and nc-decoded traffic packet counter
+        */
        BATADV_CNT_NC_DECODE,
+
+       /**
+        * @BATADV_CNT_NC_DECODE_BYTES: received and nc-decoded traffic bytes
+        *  counter
+        */
        BATADV_CNT_NC_DECODE_BYTES,
+
+       /**
+        * @BATADV_CNT_NC_DECODE_FAILED: received and decode-failed traffic
+        *  packet counter
+        */
        BATADV_CNT_NC_DECODE_FAILED,
+
+       /**
+        * @BATADV_CNT_NC_SNIFFED: counter for nc-decoded packets received in
+        *  promisc mode.
+        */
        BATADV_CNT_NC_SNIFFED,
 #endif
+
+       /** @BATADV_CNT_NUM: number of traffic counters */
        BATADV_CNT_NUM,
 };
 
 /**
  * struct batadv_priv_tt - per mesh interface translation table data
- * @vn: translation table version number
- * @ogm_append_cnt: counter of number of OGMs containing the local tt diff
- * @local_changes: changes registered in an originator interval
- * @changes_list: tracks tt local changes within an originator interval
- * @local_hash: local translation table hash table
- * @global_hash: global translation table hash table
- * @req_list: list of pending & unanswered tt_requests
- * @roam_list: list of the last roaming events of each client limiting the
- *  number of roaming events to avoid route flapping
- * @changes_list_lock: lock protecting changes_list
- * @req_list_lock: lock protecting req_list
- * @roam_list_lock: lock protecting roam_list
- * @last_changeset: last tt changeset this host has generated
- * @last_changeset_len: length of last tt changeset this host has generated
- * @last_changeset_lock: lock protecting last_changeset & last_changeset_len
- * @commit_lock: prevents from executing a local TT commit while reading the
- *  local table. The local TT commit is made up by two operations (data
- *  structure update and metdata -CRC/TTVN- recalculation) and they have to be
- *  executed atomically in order to avoid another thread to read the
- *  table/metadata between those.
- * @work: work queue callback item for translation table purging
  */
 struct batadv_priv_tt {
+       /** @vn: translation table version number */
        atomic_t vn;
+
+       /**
+        * @ogm_append_cnt: counter of number of OGMs containing the local tt
+        *  diff
+        */
        atomic_t ogm_append_cnt;
+
+       /** @local_changes: changes registered in an originator interval */
        atomic_t local_changes;
+
+       /**
+        * @changes_list: tracks tt local changes within an originator interval
+        */
        struct list_head changes_list;
+
+       /** @local_hash: local translation table hash table */
        struct batadv_hashtable *local_hash;
+
+       /** @global_hash: global translation table hash table */
        struct batadv_hashtable *global_hash;
+
+       /** @req_list: list of pending & unanswered tt_requests */
        struct hlist_head req_list;
+
+       /**
+        * @roam_list: list of the last roaming events of each client limiting
+        *  the number of roaming events to avoid route flapping
+        */
        struct list_head roam_list;
-       spinlock_t changes_list_lock; /* protects changes */
-       spinlock_t req_list_lock; /* protects req_list */
-       spinlock_t roam_list_lock; /* protects roam_list */
+
+       /** @changes_list_lock: lock protecting changes_list */
+       spinlock_t changes_list_lock;
+
+       /** @req_list_lock: lock protecting req_list */
+       spinlock_t req_list_lock;
+
+       /** @roam_list_lock: lock protecting roam_list */
+       spinlock_t roam_list_lock;
+
+       /** @last_changeset: last tt changeset this host has generated */
        unsigned char *last_changeset;
+
+       /**
+        * @last_changeset_len: length of last tt changeset this host has
+        *  generated
+        */
        s16 last_changeset_len;
-       /* protects last_changeset & last_changeset_len */
+
+       /**
+        * @last_changeset_lock: lock protecting last_changeset &
+        *  last_changeset_len
+        */
        spinlock_t last_changeset_lock;
-       /* prevents from executing a commit while reading the table */
+
+       /**
+        * @commit_lock: prevents from executing a local TT commit while reading
+        *  the local table. The local TT commit is made up by two operations
+        *  (data structure update and metdata -CRC/TTVN- recalculation) and
+        *  they have to be executed atomically in order to avoid another thread
+        *  to read the table/metadata between those.
+        */
        spinlock_t commit_lock;
+
+       /** @work: work queue callback item for translation table purging */
        struct delayed_work work;
 };
 
@@ -675,31 +1003,57 @@ struct batadv_priv_tt {
 
 /**
  * struct batadv_priv_bla - per mesh interface bridge loope avoidance data
- * @num_requests: number of bla requests in flight
- * @claim_hash: hash table containing mesh nodes this host has claimed
- * @backbone_hash: hash table containing all detected backbone gateways
- * @loopdetect_addr: MAC address used for own loopdetection frames
- * @loopdetect_lasttime: time when the loopdetection frames were sent
- * @loopdetect_next: how many periods to wait for the next loopdetect process
- * @bcast_duplist: recently received broadcast packets array (for broadcast
- *  duplicate suppression)
- * @bcast_duplist_curr: index of last broadcast packet added to bcast_duplist
- * @bcast_duplist_lock: lock protecting bcast_duplist & bcast_duplist_curr
- * @claim_dest: local claim data (e.g. claim group)
- * @work: work queue callback item for cleanups & bla announcements
  */
 struct batadv_priv_bla {
+       /** @num_requests: number of bla requests in flight */
        atomic_t num_requests;
+
+       /**
+        * @claim_hash: hash table containing mesh nodes this host has claimed
+        */
        struct batadv_hashtable *claim_hash;
+
+       /**
+        * @backbone_hash: hash table containing all detected backbone gateways
+        */
        struct batadv_hashtable *backbone_hash;
+
+       /** @loopdetect_addr: MAC address used for own loopdetection frames */
        u8 loopdetect_addr[ETH_ALEN];
+
+       /**
+        * @loopdetect_lasttime: time when the loopdetection frames were sent
+        */
        unsigned long loopdetect_lasttime;
+
+       /**
+        * @loopdetect_next: how many periods to wait for the next loopdetect
+        *  process
+        */
        atomic_t loopdetect_next;
+
+       /**
+        * @bcast_duplist: recently received broadcast packets array (for
+        *  broadcast duplicate suppression)
+        */
        struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
+
+       /**
+        * @bcast_duplist_curr: index of last broadcast packet added to
+        *  bcast_duplist
+        */
        int bcast_duplist_curr;
-       /* protects bcast_duplist & bcast_duplist_curr */
+
+       /**
+        * @bcast_duplist_lock: lock protecting bcast_duplist &
+        *  bcast_duplist_curr
+        */
        spinlock_t bcast_duplist_lock;
+
+       /** @claim_dest: local claim data (e.g. claim group) */
        struct batadv_bla_claim_dst claim_dest;
+
+       /** @work: work queue callback item for cleanups & bla announcements */
        struct delayed_work work;
 };
 #endif
@@ -708,68 +1062,94 @@ struct batadv_priv_bla {
 
 /**
  * struct batadv_priv_debug_log - debug logging data
- * @log_buff: buffer holding the logs (ring bufer)
- * @log_start: index of next character to read
- * @log_end: index of next character to write
- * @lock: lock protecting log_buff, log_start & log_end
- * @queue_wait: log reader's wait queue
  */
 struct batadv_priv_debug_log {
+       /** @log_buff: buffer holding the logs (ring bufer) */
        char log_buff[BATADV_LOG_BUF_LEN];
+
+       /** @log_start: index of next character to read */
        unsigned long log_start;
+
+       /** @log_end: index of next character to write */
        unsigned long log_end;
-       spinlock_t lock; /* protects log_buff, log_start and log_end */
+
+       /** @lock: lock protecting log_buff, log_start & log_end */
+       spinlock_t lock;
+
+       /** @queue_wait: log reader's wait queue */
        wait_queue_head_t queue_wait;
 };
 #endif
 
 /**
  * struct batadv_priv_gw - per mesh interface gateway data
- * @gateway_list: list of available gateway nodes
- * @list_lock: lock protecting gateway_list & curr_gw
- * @curr_gw: pointer to currently selected gateway node
- * @mode: gateway operation: off, client or server (see batadv_gw_modes)
- * @sel_class: gateway selection class (applies if gw_mode client)
- * @bandwidth_down: advertised uplink download bandwidth (if gw_mode server)
- * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
- * @reselect: bool indicating a gateway re-selection is in progress
  */
 struct batadv_priv_gw {
+       /** @gateway_list: list of available gateway nodes */
        struct hlist_head gateway_list;
-       spinlock_t list_lock; /* protects gateway_list & curr_gw */
-       struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
+
+       /** @list_lock: lock protecting gateway_list & curr_gw */
+       spinlock_t list_lock;
+
+       /** @curr_gw: pointer to currently selected gateway node */
+       struct batadv_gw_node __rcu *curr_gw;
+
+       /**
+        * @mode: gateway operation: off, client or server (see batadv_gw_modes)
+        */
        atomic_t mode;
+
+       /** @sel_class: gateway selection class (applies if gw_mode client) */
        atomic_t sel_class;
+
+       /**
+        * @bandwidth_down: advertised uplink download bandwidth (if gw_mode
+        *  server)
+        */
        atomic_t bandwidth_down;
+
+       /**
+        * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
+        */
        atomic_t bandwidth_up;
+
+       /** @reselect: bool indicating a gateway re-selection is in progress */
        atomic_t reselect;
 };
 
 /**
  * struct batadv_priv_tvlv - per mesh interface tvlv data
- * @container_list: list of registered tvlv containers to be sent with each OGM
- * @handler_list: list of the various tvlv content handlers
- * @container_list_lock: protects tvlv container list access
- * @handler_list_lock: protects handler list access
  */
 struct batadv_priv_tvlv {
+       /**
+        * @container_list: list of registered tvlv containers to be sent with
+        *  each OGM
+        */
        struct hlist_head container_list;
+
+       /** @handler_list: list of the various tvlv content handlers */
        struct hlist_head handler_list;
-       spinlock_t container_list_lock; /* protects container_list */
-       spinlock_t handler_list_lock; /* protects handler_list */
+
+       /** @container_list_lock: protects tvlv container list access */
+       spinlock_t container_list_lock;
+
+       /** @handler_list_lock: protects handler list access */
+       spinlock_t handler_list_lock;
 };
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 
 /**
  * struct batadv_priv_dat - per mesh interface DAT private data
- * @addr: node DAT address
- * @hash: hashtable representing the local ARP cache
- * @work: work queue callback item for cache purging
  */
 struct batadv_priv_dat {
+       /** @addr: node DAT address */
        batadv_dat_addr_t addr;
+
+       /** @hash: hashtable representing the local ARP cache */
        struct batadv_hashtable *hash;
+
+       /** @work: work queue callback item for cache purging */
        struct delayed_work work;
 };
 #endif
@@ -777,375 +1157,582 @@ struct batadv_priv_dat {
 #ifdef CONFIG_BATMAN_ADV_MCAST
 /**
  * struct batadv_mcast_querier_state - IGMP/MLD querier state when bridged
- * @exists: whether a querier exists in the mesh
- * @shadowing: if a querier exists, whether it is potentially shadowing
- *  multicast listeners (i.e. querier is behind our own bridge segment)
  */
 struct batadv_mcast_querier_state {
+       /** @exists: whether a querier exists in the mesh */
        bool exists;
+
+       /**
+        * @shadowing: if a querier exists, whether it is potentially shadowing
+        *  multicast listeners (i.e. querier is behind our own bridge segment)
+        */
        bool shadowing;
 };
 
 /**
  * struct batadv_priv_mcast - per mesh interface mcast data
- * @mla_list: list of multicast addresses we are currently announcing via TT
- * @want_all_unsnoopables_list: a list of orig_nodes wanting all unsnoopable
- *  multicast traffic
- * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast traffic
- * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast traffic
- * @querier_ipv4: the current state of an IGMP querier in the mesh
- * @querier_ipv6: the current state of an MLD querier in the mesh
- * @flags: the flags we have last sent in our mcast tvlv
- * @enabled: whether the multicast tvlv is currently enabled
- * @bridged: whether the soft interface has a bridge on top
- * @num_disabled: number of nodes that have no mcast tvlv
- * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP traffic
- * @num_want_all_ipv4: counter for items in want_all_ipv4_list
- * @num_want_all_ipv6: counter for items in want_all_ipv6_list
- * @want_lists_lock: lock for protecting modifications to mcast want lists
- *  (traversals are rcu-locked)
- * @work: work queue callback item for multicast TT and TVLV updates
  */
 struct batadv_priv_mcast {
+       /**
+        * @mla_list: list of multicast addresses we are currently announcing
+        *  via TT
+        */
        struct hlist_head mla_list; /* see __batadv_mcast_mla_update() */
+
+       /**
+        * @want_all_unsnoopables_list: a list of orig_nodes wanting all
+        *  unsnoopable multicast traffic
+        */
        struct hlist_head want_all_unsnoopables_list;
+
+       /**
+        * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast
+        *  traffic
+        */
        struct hlist_head want_all_ipv4_list;
+
+       /**
+        * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast
+        *  traffic
+        */
        struct hlist_head want_all_ipv6_list;
+
+       /** @querier_ipv4: the current state of an IGMP querier in the mesh */
        struct batadv_mcast_querier_state querier_ipv4;
+
+       /** @querier_ipv6: the current state of an MLD querier in the mesh */
        struct batadv_mcast_querier_state querier_ipv6;
+
+       /** @flags: the flags we have last sent in our mcast tvlv */
        u8 flags;
+
+       /** @enabled: whether the multicast tvlv is currently enabled */
        bool enabled;
+
+       /** @bridged: whether the soft interface has a bridge on top */
        bool bridged;
+
+       /** @num_disabled: number of nodes that have no mcast tvlv */
        atomic_t num_disabled;
+
+       /**
+        * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP
+        *  traffic
+        */
        atomic_t num_want_all_unsnoopables;
+
+       /** @num_want_all_ipv4: counter for items in want_all_ipv4_list */
        atomic_t num_want_all_ipv4;
+
+       /** @num_want_all_ipv6: counter for items in want_all_ipv6_list */
        atomic_t num_want_all_ipv6;
-       /* protects want_all_{unsnoopables,ipv4,ipv6}_list */
+
+       /**
+        * @want_lists_lock: lock for protecting modifications to mcasts
+        *  want_all_{unsnoopables,ipv4,ipv6}_list (traversals are rcu-locked)
+        */
        spinlock_t want_lists_lock;
+
+       /** @work: work queue callback item for multicast TT and TVLV updates */
        struct delayed_work work;
 };
 #endif
 
 /**
  * struct batadv_priv_nc - per mesh interface network coding private data
- * @work: work queue callback item for cleanup
- * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
- * @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
- * @max_fwd_delay: maximum packet forward delay to allow coding of packets
- * @max_buffer_time: buffer time for sniffed packets used to decoding
- * @timestamp_fwd_flush: timestamp of last forward packet queue flush
- * @timestamp_sniffed_purge: timestamp of last sniffed packet queue purge
- * @coding_hash: Hash table used to buffer skbs while waiting for another
- *  incoming skb to code it with. Skbs are added to the buffer just before being
- *  forwarded in routing.c
- * @decoding_hash: Hash table used to buffer skbs that might be needed to decode
- *  a received coded skb. The buffer is used for 1) skbs arriving on the
- *  soft-interface; 2) skbs overheard on the hard-interface; and 3) skbs
- *  forwarded by batman-adv.
  */
 struct batadv_priv_nc {
+       /** @work: work queue callback item for cleanup */
        struct delayed_work work;
+
+       /**
+        * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
+        */
        struct dentry *debug_dir;
+
+       /**
+        * @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
+        */
        u8 min_tq;
+
+       /**
+        * @max_fwd_delay: maximum packet forward delay to allow coding of
+        *  packets
+        */
        u32 max_fwd_delay;
+
+       /**
+        * @max_buffer_time: buffer time for sniffed packets used to decoding
+        */
        u32 max_buffer_time;
+
+       /**
+        * @timestamp_fwd_flush: timestamp of last forward packet queue flush
+        */
        unsigned long timestamp_fwd_flush;
+
+       /**
+        * @timestamp_sniffed_purge: timestamp of last sniffed packet queue
+        *  purge
+        */
        unsigned long timestamp_sniffed_purge;
+
+       /**
+        * @coding_hash: Hash table used to buffer skbs while waiting for
+        *  another incoming skb to code it with. Skbs are added to the buffer
+        *  just before being forwarded in routing.c
+        */
        struct batadv_hashtable *coding_hash;
+
+       /**
+        * @decoding_hash: Hash table used to buffer skbs that might be needed
+        *  to decode a received coded skb. The buffer is used for 1) skbs
+        *  arriving on the soft-interface; 2) skbs overheard on the
+        *  hard-interface; and 3) skbs forwarded by batman-adv.
+        */
        struct batadv_hashtable *decoding_hash;
 };
 
 /**
  * struct batadv_tp_unacked - unacked packet meta-information
- * @seqno: seqno of the unacked packet
- * @len: length of the packet
- * @list: list node for batadv_tp_vars::unacked_list
  *
  * This struct is supposed to represent a buffer unacked packet. However, since
  * the purpose of the TP meter is to count the traffic only, there is no need to
  * store the entire sk_buff, the starting offset and the length are enough
  */
 struct batadv_tp_unacked {
+       /** @seqno: seqno of the unacked packet */
        u32 seqno;
+
+       /** @len: length of the packet */
        u16 len;
+
+       /** @list: list node for &batadv_tp_vars.unacked_list */
        struct list_head list;
 };
 
 /**
  * enum batadv_tp_meter_role - Modus in tp meter session
- * @BATADV_TP_RECEIVER: Initialized as receiver
- * @BATADV_TP_SENDER: Initialized as sender
  */
 enum batadv_tp_meter_role {
+       /** @BATADV_TP_RECEIVER: Initialized as receiver */
        BATADV_TP_RECEIVER,
+
+       /** @BATADV_TP_SENDER: Initialized as sender */
        BATADV_TP_SENDER
 };
 
 /**
  * struct batadv_tp_vars - tp meter private variables per session
- * @list: list node for bat_priv::tp_list
- * @timer: timer for ack (receiver) and retry (sender)
- * @bat_priv: pointer to the mesh object
- * @start_time: start time in jiffies
- * @other_end: mac address of remote
- * @role: receiver/sender modi
- * @sending: sending binary semaphore: 1 if sending, 0 is not
- * @reason: reason for a stopped session
- * @finish_work: work item for the finishing procedure
- * @test_length: test length in milliseconds
- * @session: TP session identifier
- * @icmp_uid: local ICMP "socket" index
- * @dec_cwnd: decimal part of the cwnd used during linear growth
- * @cwnd: current size of the congestion window
- * @cwnd_lock: lock do protect @cwnd & @dec_cwnd
- * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the
- *  connection switches to the Congestion Avoidance state
- * @last_acked: last acked byte
- * @last_sent: last sent byte, not yet acked
- * @tot_sent: amount of data sent/ACKed so far
- * @dup_acks: duplicate ACKs counter
- * @fast_recovery: true if in Fast Recovery mode
- * @recover: last sent seqno when entering Fast Recovery
- * @rto: sender timeout
- * @srtt: smoothed RTT scaled by 2^3
- * @rttvar: RTT variation scaled by 2^2
- * @more_bytes: waiting queue anchor when waiting for more ack/retry timeout
- * @prerandom_offset: offset inside the prerandom buffer
- * @prerandom_lock: spinlock protecting access to prerandom_offset
- * @last_recv: last in-order received packet
- * @unacked_list: list of unacked packets (meta-info only)
- * @unacked_lock: protect unacked_list
- * @last_recv_time: time time (jiffies) a msg was received
- * @refcount: number of context where the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tp_vars {
+       /** @list: list node for &bat_priv.tp_list */
        struct hlist_node list;
+
+       /** @timer: timer for ack (receiver) and retry (sender) */
        struct timer_list timer;
+
+       /** @bat_priv: pointer to the mesh object */
        struct batadv_priv *bat_priv;
+
+       /** @start_time: start time in jiffies */
        unsigned long start_time;
+
+       /** @other_end: mac address of remote */
        u8 other_end[ETH_ALEN];
+
+       /** @role: receiver/sender modi */
        enum batadv_tp_meter_role role;
+
+       /** @sending: sending binary semaphore: 1 if sending, 0 is not */
        atomic_t sending;
+
+       /** @reason: reason for a stopped session */
        enum batadv_tp_meter_reason reason;
+
+       /** @finish_work: work item for the finishing procedure */
        struct delayed_work finish_work;
+
+       /** @test_length: test length in milliseconds */
        u32 test_length;
+
+       /** @session: TP session identifier */
        u8 session[2];
+
+       /** @icmp_uid: local ICMP "socket" index */
        u8 icmp_uid;
 
        /* sender variables */
+
+       /** @dec_cwnd: decimal part of the cwnd used during linear growth */
        u16 dec_cwnd;
+
+       /** @cwnd: current size of the congestion window */
        u32 cwnd;
-       spinlock_t cwnd_lock; /* Protects cwnd & dec_cwnd */
+
+       /** @cwnd_lock: lock do protect @cwnd & @dec_cwnd */
+       spinlock_t cwnd_lock;
+
+       /**
+        * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the
+        *  connection switches to the Congestion Avoidance state
+        */
        u32 ss_threshold;
+
+       /** @last_acked: last acked byte */
        atomic_t last_acked;
+
+       /** @last_sent: last sent byte, not yet acked */
        u32 last_sent;
+
+       /** @tot_sent: amount of data sent/ACKed so far */
        atomic64_t tot_sent;
+
+       /** @dup_acks: duplicate ACKs counter */
        atomic_t dup_acks;
+
+       /** @fast_recovery: true if in Fast Recovery mode */
        bool fast_recovery;
+
+       /** @recover: last sent seqno when entering Fast Recovery */
        u32 recover;
+
+       /** @rto: sender timeout */
        u32 rto;
+
+       /** @srtt: smoothed RTT scaled by 2^3 */
        u32 srtt;
+
+       /** @rttvar: RTT variation scaled by 2^2 */
        u32 rttvar;
+
+       /**
+        * @more_bytes: waiting queue anchor when waiting for more ack/retry
+        *  timeout
+        */
        wait_queue_head_t more_bytes;
+
+       /** @prerandom_offset: offset inside the prerandom buffer */
        u32 prerandom_offset;
-       spinlock_t prerandom_lock; /* Protects prerandom_offset */
+
+       /** @prerandom_lock: spinlock protecting access to prerandom_offset */
+       spinlock_t prerandom_lock;
 
        /* receiver variables */
+
+       /** @last_recv: last in-order received packet */
        u32 last_recv;
+
+       /** @unacked_list: list of unacked packets (meta-info only) */
        struct list_head unacked_list;
-       spinlock_t unacked_lock; /* Protects unacked_list */
+
+       /** @unacked_lock: protect unacked_list */
+       spinlock_t unacked_lock;
+
+       /** @last_recv_time: time time (jiffies) a msg was received */
        unsigned long last_recv_time;
+
+       /** @refcount: number of context where the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_softif_vlan - per VLAN attributes set
- * @bat_priv: pointer to the mesh object
- * @vid: VLAN identifier
- * @kobj: kobject for sysfs vlan subdirectory
- * @ap_isolation: AP isolation state
- * @tt: TT private attributes (VLAN specific)
- * @list: list node for bat_priv::softif_vlan_list
- * @refcount: number of context where this object is currently in use
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_softif_vlan {
+       /** @bat_priv: pointer to the mesh object */
        struct batadv_priv *bat_priv;
+
+       /** @vid: VLAN identifier */
        unsigned short vid;
+
+       /** @kobj: kobject for sysfs vlan subdirectory */
        struct kobject *kobj;
+
+       /** @ap_isolation: AP isolation state */
        atomic_t ap_isolation;          /* boolean */
+
+       /** @tt: TT private attributes (VLAN specific) */
        struct batadv_vlan_tt tt;
+
+       /** @list: list node for &bat_priv.softif_vlan_list */
        struct hlist_node list;
+
+       /**
+        * @refcount: number of context where this object is currently in use
+        */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in a RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_priv_bat_v - B.A.T.M.A.N. V per soft-interface private data
- * @ogm_buff: buffer holding the OGM packet
- * @ogm_buff_len: length of the OGM packet buffer
- * @ogm_seqno: OGM sequence number - used to identify each OGM
- * @ogm_wq: workqueue used to schedule OGM transmissions
  */
 struct batadv_priv_bat_v {
+       /** @ogm_buff: buffer holding the OGM packet */
        unsigned char *ogm_buff;
+
+       /** @ogm_buff_len: length of the OGM packet buffer */
        int ogm_buff_len;
+
+       /** @ogm_seqno: OGM sequence number - used to identify each OGM */
        atomic_t ogm_seqno;
+
+       /** @ogm_wq: workqueue used to schedule OGM transmissions */
        struct delayed_work ogm_wq;
 };
 
 /**
  * struct batadv_priv - per mesh interface data
- * @mesh_state: current status of the mesh (inactive/active/deactivating)
- * @soft_iface: net device which holds this struct as private data
- * @bat_counters: mesh internal traffic statistic counters (see batadv_counters)
- * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
- * @bonding: bool indicating whether traffic bonding is enabled
- * @fragmentation: bool indicating whether traffic fragmentation is enabled
- * @packet_size_max: max packet size that can be transmitted via
- *  multiple fragmented skbs or a single frame if fragmentation is disabled
- * @frag_seqno: incremental counter to identify chains of egress fragments
- * @bridge_loop_avoidance: bool indicating whether bridge loop avoidance is
- *  enabled
- * @distributed_arp_table: bool indicating whether distributed ARP table is
- *  enabled
- * @multicast_mode: Enable or disable multicast optimizations on this node's
- *  sender/originating side
- * @orig_interval: OGM broadcast interval in milliseconds
- * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop
- * @log_level: configured log level (see batadv_dbg_level)
- * @isolation_mark: the skb->mark value used to match packets for AP isolation
- * @isolation_mark_mask: bitmask identifying the bits in skb->mark to be used
- *  for the isolation mark
- * @bcast_seqno: last sent broadcast packet sequence number
- * @bcast_queue_left: number of remaining buffered broadcast packet slots
- * @batman_queue_left: number of remaining OGM packet slots
- * @num_ifaces: number of interfaces assigned to this mesh interface
- * @mesh_obj: kobject for sysfs mesh subdirectory
- * @debug_dir: dentry for debugfs batman-adv subdirectory
- * @forw_bat_list: list of aggregated OGMs that will be forwarded
- * @forw_bcast_list: list of broadcast packets that will be rebroadcasted
- * @tp_list: list of tp sessions
- * @tp_num: number of currently active tp sessions
- * @orig_hash: hash table containing mesh participants (orig nodes)
- * @forw_bat_list_lock: lock protecting forw_bat_list
- * @forw_bcast_list_lock: lock protecting forw_bcast_list
- * @tp_list_lock: spinlock protecting @tp_list
- * @orig_work: work queue callback item for orig node purging
- * @primary_if: one of the hard-interfaces assigned to this mesh interface
- *  becomes the primary interface
- * @algo_ops: routing algorithm used by this mesh interface
- * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top
- *  of the mesh interface represented by this object
- * @softif_vlan_list_lock: lock protecting softif_vlan_list
- * @bla: bridge loope avoidance data
- * @debug_log: holding debug logging relevant data
- * @gw: gateway data
- * @tt: translation table data
- * @tvlv: type-version-length-value data
- * @dat: distributed arp table data
- * @mcast: multicast data
- * @network_coding: bool indicating whether network coding is enabled
- * @nc: network coding data
- * @bat_v: B.A.T.M.A.N. V per soft-interface private data
  */
 struct batadv_priv {
+       /**
+        * @mesh_state: current status of the mesh
+        *  (inactive/active/deactivating)
+        */
        atomic_t mesh_state;
+
+       /** @soft_iface: net device which holds this struct as private data */
        struct net_device *soft_iface;
+
+       /**
+        * @bat_counters: mesh internal traffic statistic counters (see
+        *  batadv_counters)
+        */
        u64 __percpu *bat_counters; /* Per cpu counters */
+
+       /**
+        * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
+        */
        atomic_t aggregated_ogms;
+
+       /** @bonding: bool indicating whether traffic bonding is enabled */
        atomic_t bonding;
+
+       /**
+        * @fragmentation: bool indicating whether traffic fragmentation is
+        *  enabled
+        */
        atomic_t fragmentation;
+
+       /**
+        * @packet_size_max: max packet size that can be transmitted via
+        *  multiple fragmented skbs or a single frame if fragmentation is
+        *  disabled
+        */
        atomic_t packet_size_max;
+
+       /**
+        * @frag_seqno: incremental counter to identify chains of egress
+        *  fragments
+        */
        atomic_t frag_seqno;
+
 #ifdef CONFIG_BATMAN_ADV_BLA
+       /**
+        * @bridge_loop_avoidance: bool indicating whether bridge loop
+        *  avoidance is enabled
+        */
        atomic_t bridge_loop_avoidance;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+       /**
+        * @distributed_arp_table: bool indicating whether distributed ARP table
+        *  is enabled
+        */
        atomic_t distributed_arp_table;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
+       /**
+        * @multicast_mode: Enable or disable multicast optimizations on this
+        *  node's sender/originating side
+        */
        atomic_t multicast_mode;
 #endif
+
+       /** @orig_interval: OGM broadcast interval in milliseconds */
        atomic_t orig_interval;
+
+       /**
+        * @hop_penalty: penalty which will be applied to an OGM's tq-field on
+        *  every hop
+        */
        atomic_t hop_penalty;
+
 #ifdef CONFIG_BATMAN_ADV_DEBUG
+       /** @log_level: configured log level (see batadv_dbg_level) */
        atomic_t log_level;
 #endif
+
+       /**
+        * @isolation_mark: the skb->mark value used to match packets for AP
+        *  isolation
+        */
        u32 isolation_mark;
+
+       /**
+        * @isolation_mark_mask: bitmask identifying the bits in skb->mark to be
+        *  used for the isolation mark
+        */
        u32 isolation_mark_mask;
+
+       /** @bcast_seqno: last sent broadcast packet sequence number */
        atomic_t bcast_seqno;
+
+       /**
+        * @bcast_queue_left: number of remaining buffered broadcast packet
+        *  slots
+        */
        atomic_t bcast_queue_left;
+
+       /** @batman_queue_left: number of remaining OGM packet slots */
        atomic_t batman_queue_left;
+
+       /** @num_ifaces: number of interfaces assigned to this mesh interface */
        char num_ifaces;
+
+       /** @mesh_obj: kobject for sysfs mesh subdirectory */
        struct kobject *mesh_obj;
+
+       /** @debug_dir: dentry for debugfs batman-adv subdirectory */
        struct dentry *debug_dir;
+
+       /** @forw_bat_list: list of aggregated OGMs that will be forwarded */
        struct hlist_head forw_bat_list;
+
+       /**
+        * @forw_bcast_list: list of broadcast packets that will be
+        *  rebroadcasted
+        */
        struct hlist_head forw_bcast_list;
+
+       /** @tp_list: list of tp sessions */
        struct hlist_head tp_list;
+
+       /** @tp_num: number of currently active tp sessions */
        struct batadv_hashtable *orig_hash;
-       spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
-       spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */
-       spinlock_t tp_list_lock; /* protects tp_list */
+
+       /** @orig_hash: hash table containing mesh participants (orig nodes) */
+       spinlock_t forw_bat_list_lock;
+
+       /** @forw_bat_list_lock: lock protecting forw_bat_list */
+       spinlock_t forw_bcast_list_lock;
+
+       /** @forw_bcast_list_lock: lock protecting forw_bcast_list */
+       spinlock_t tp_list_lock;
+
+       /** @tp_list_lock: spinlock protecting @tp_list */
        atomic_t tp_num;
+
+       /** @orig_work: work queue callback item for orig node purging */
        struct delayed_work orig_work;
+
+       /**
+        * @primary_if: one of the hard-interfaces assigned to this mesh
+        *  interface becomes the primary interface
+        */
        struct batadv_hard_iface __rcu *primary_if;  /* rcu protected pointer */
+
+       /** @algo_ops: routing algorithm used by this mesh interface */
        struct batadv_algo_ops *algo_ops;
+
+       /**
+        * @softif_vlan_list: a list of softif_vlan structs, one per VLAN
+        *  created on top of the mesh interface represented by this object
+        */
        struct hlist_head softif_vlan_list;
-       spinlock_t softif_vlan_list_lock; /* protects softif_vlan_list */
+
+       /** @softif_vlan_list_lock: lock protecting softif_vlan_list */
+       spinlock_t softif_vlan_list_lock;
+
 #ifdef CONFIG_BATMAN_ADV_BLA
+       /** @bla: bridge loope avoidance data */
        struct batadv_priv_bla bla;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_DEBUG
+       /** @debug_log: holding debug logging relevant data */
        struct batadv_priv_debug_log *debug_log;
 #endif
+
+       /** @gw: gateway data */
        struct batadv_priv_gw gw;
+
+       /** @tt: translation table data */
        struct batadv_priv_tt tt;
+
+       /** @tvlv: type-version-length-value data */
        struct batadv_priv_tvlv tvlv;
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+       /** @dat: distributed arp table data */
        struct batadv_priv_dat dat;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
+       /** @mcast: multicast data */
        struct batadv_priv_mcast mcast;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_NC
+       /**
+        * @network_coding: bool indicating whether network coding is enabled
+        */
        atomic_t network_coding;
+
+       /** @nc: network coding data */
        struct batadv_priv_nc nc;
 #endif /* CONFIG_BATMAN_ADV_NC */
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+       /** @bat_v: B.A.T.M.A.N. V per soft-interface private data */
        struct batadv_priv_bat_v bat_v;
 #endif
 };
 
 /**
  * struct batadv_socket_client - layer2 icmp socket client data
- * @queue_list: packet queue for packets destined for this socket client
- * @queue_len: number of packets in the packet queue (queue_list)
- * @index: socket client's index in the batadv_socket_client_hash
- * @lock: lock protecting queue_list, queue_len & index
- * @queue_wait: socket client's wait queue
- * @bat_priv: pointer to soft_iface this client belongs to
  */
 struct batadv_socket_client {
+       /**
+        * @queue_list: packet queue for packets destined for this socket client
+        */
        struct list_head queue_list;
+
+       /** @queue_len: number of packets in the packet queue (queue_list) */
        unsigned int queue_len;
+
+       /** @index: socket client's index in the batadv_socket_client_hash */
        unsigned char index;
-       spinlock_t lock; /* protects queue_list, queue_len & index */
+
+       /** @lock: lock protecting queue_list, queue_len & index */
+       spinlock_t lock;
+
+       /** @queue_wait: socket client's wait queue */
        wait_queue_head_t queue_wait;
+
+       /** @bat_priv: pointer to soft_iface this client belongs to */
        struct batadv_priv *bat_priv;
 };
 
 /**
  * struct batadv_socket_packet - layer2 icmp packet for socket client
- * @list: list node for batadv_socket_client::queue_list
- * @icmp_len: size of the layer2 icmp packet
- * @icmp_packet: layer2 icmp packet
  */
 struct batadv_socket_packet {
+       /** @list: list node for &batadv_socket_client.queue_list */
        struct list_head list;
+
+       /** @icmp_len: size of the layer2 icmp packet */
        size_t icmp_len;
+
+       /** @icmp_packet: layer2 icmp packet */
        u8 icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
 };
 
@@ -1153,312 +1740,432 @@ struct batadv_socket_packet {
 
 /**
  * struct batadv_bla_backbone_gw - batman-adv gateway bridged into the LAN
- * @orig: originator address of backbone node (mac address of primary iface)
- * @vid: vlan id this gateway was detected on
- * @hash_entry: hlist node for batadv_priv_bla::backbone_hash
- * @bat_priv: pointer to soft_iface this backbone gateway belongs to
- * @lasttime: last time we heard of this backbone gw
- * @wait_periods: grace time for bridge forward delays and bla group forming at
- *  bootup phase - no bcast traffic is formwared until it has elapsed
- * @request_sent: if this bool is set to true we are out of sync with this
- *  backbone gateway - no bcast traffic is formwared until the situation was
- *  resolved
- * @crc: crc16 checksum over all claims
- * @crc_lock: lock protecting crc
- * @report_work: work struct for reporting detected loops
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_bla_backbone_gw {
+       /**
+        * @orig: originator address of backbone node (mac address of primary
+        *  iface)
+        */
        u8 orig[ETH_ALEN];
+
+       /** @vid: vlan id this gateway was detected on */
        unsigned short vid;
+
+       /** @hash_entry: hlist node for &batadv_priv_bla.backbone_hash */
        struct hlist_node hash_entry;
+
+       /** @bat_priv: pointer to soft_iface this backbone gateway belongs to */
        struct batadv_priv *bat_priv;
+
+       /** @lasttime: last time we heard of this backbone gw */
        unsigned long lasttime;
+
+       /**
+        * @wait_periods: grace time for bridge forward delays and bla group
+        *  forming at bootup phase - no bcast traffic is formwared until it has
+        *  elapsed
+        */
        atomic_t wait_periods;
+
+       /**
+        * @request_sent: if this bool is set to true we are out of sync with
+        *  this backbone gateway - no bcast traffic is formwared until the
+        *  situation was resolved
+        */
        atomic_t request_sent;
+
+       /** @crc: crc16 checksum over all claims */
        u16 crc;
-       spinlock_t crc_lock; /* protects crc */
+
+       /** @crc_lock: lock protecting crc */
+       spinlock_t crc_lock;
+
+       /** @report_work: work struct for reporting detected loops */
        struct work_struct report_work;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_bla_claim - claimed non-mesh client structure
- * @addr: mac address of claimed non-mesh client
- * @vid: vlan id this client was detected on
- * @backbone_gw: pointer to backbone gw claiming this client
- * @backbone_lock: lock protecting backbone_gw pointer
- * @lasttime: last time we heard of claim (locals only)
- * @hash_entry: hlist node for batadv_priv_bla::claim_hash
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_bla_claim {
+       /** @addr: mac address of claimed non-mesh client */
        u8 addr[ETH_ALEN];
+
+       /** @vid: vlan id this client was detected on */
        unsigned short vid;
+
+       /** @backbone_gw: pointer to backbone gw claiming this client */
        struct batadv_bla_backbone_gw *backbone_gw;
-       spinlock_t backbone_lock; /* protects backbone_gw */
+
+       /** @backbone_lock: lock protecting backbone_gw pointer */
+       spinlock_t backbone_lock;
+
+       /** @lasttime: last time we heard of claim (locals only) */
        unsigned long lasttime;
+
+       /** @hash_entry: hlist node for &batadv_priv_bla.claim_hash */
        struct hlist_node hash_entry;
+
+       /** @refcount: number of contexts the object is used */
        struct rcu_head rcu;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct kref refcount;
 };
 #endif
 
 /**
  * struct batadv_tt_common_entry - tt local & tt global common data
- * @addr: mac address of non-mesh client
- * @vid: VLAN identifier
- * @hash_entry: hlist node for batadv_priv_tt::local_hash or for
- *  batadv_priv_tt::global_hash
- * @flags: various state handling flags (see batadv_tt_client_flags)
- * @added_at: timestamp used for purging stale tt common entries
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tt_common_entry {
+       /** @addr: mac address of non-mesh client */
        u8 addr[ETH_ALEN];
+
+       /** @vid: VLAN identifier */
        unsigned short vid;
+
+       /**
+        * @hash_entry: hlist node for &batadv_priv_tt.local_hash or for
+        *  &batadv_priv_tt.global_hash
+        */
        struct hlist_node hash_entry;
+
+       /** @flags: various state handling flags (see batadv_tt_client_flags) */
        u16 flags;
+
+       /** @added_at: timestamp used for purging stale tt common entries */
        unsigned long added_at;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_tt_local_entry - translation table local entry data
- * @common: general translation table data
- * @last_seen: timestamp used for purging stale tt local entries
- * @vlan: soft-interface vlan of the entry
  */
 struct batadv_tt_local_entry {
+       /** @common: general translation table data */
        struct batadv_tt_common_entry common;
+
+       /** @last_seen: timestamp used for purging stale tt local entries */
        unsigned long last_seen;
+
+       /** @vlan: soft-interface vlan of the entry */
        struct batadv_softif_vlan *vlan;
 };
 
 /**
  * struct batadv_tt_global_entry - translation table global entry data
- * @common: general translation table data
- * @orig_list: list of orig nodes announcing this non-mesh client
- * @orig_list_count: number of items in the orig_list
- * @list_lock: lock protecting orig_list
- * @roam_at: time at which TT_GLOBAL_ROAM was set
  */
 struct batadv_tt_global_entry {
+       /** @common: general translation table data */
        struct batadv_tt_common_entry common;
+
+       /** @orig_list: list of orig nodes announcing this non-mesh client */
        struct hlist_head orig_list;
+
+       /** @orig_list_count: number of items in the orig_list */
        atomic_t orig_list_count;
-       spinlock_t list_lock;   /* protects orig_list */
+
+       /** @list_lock: lock protecting orig_list */
+       spinlock_t list_lock;
+
+       /** @roam_at: time at which TT_GLOBAL_ROAM was set */
        unsigned long roam_at;
 };
 
 /**
  * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
- * @orig_node: pointer to orig node announcing this non-mesh client
- * @ttvn: translation table version number which added the non-mesh client
- * @flags: per orig entry TT sync flags
- * @list: list node for batadv_tt_global_entry::orig_list
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tt_orig_list_entry {
+       /** @orig_node: pointer to orig node announcing this non-mesh client */
        struct batadv_orig_node *orig_node;
+
+       /**
+        * @ttvn: translation table version number which added the non-mesh
+        *  client
+        */
        u8 ttvn;
+
+       /** @flags: per orig entry TT sync flags */
        u8 flags;
+
+       /** @list: list node for &batadv_tt_global_entry.orig_list */
        struct hlist_node list;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_tt_change_node - structure for tt changes occurred
- * @list: list node for batadv_priv_tt::changes_list
- * @change: holds the actual translation table diff data
  */
 struct batadv_tt_change_node {
+       /** @list: list node for &batadv_priv_tt.changes_list */
        struct list_head list;
+
+       /** @change: holds the actual translation table diff data */
        struct batadv_tvlv_tt_change change;
 };
 
 /**
  * struct batadv_tt_req_node - data to keep track of the tt requests in flight
- * @addr: mac address address of the originator this request was sent to
- * @issued_at: timestamp used for purging stale tt requests
- * @refcount: number of contexts the object is used by
- * @list: list node for batadv_priv_tt::req_list
  */
 struct batadv_tt_req_node {
+       /**
+        * @addr: mac address address of the originator this request was sent to
+        */
        u8 addr[ETH_ALEN];
+
+       /** @issued_at: timestamp used for purging stale tt requests */
        unsigned long issued_at;
+
+       /** @refcount: number of contexts the object is used by */
        struct kref refcount;
+
+       /** @list: list node for &batadv_priv_tt.req_list */
        struct hlist_node list;
 };
 
 /**
  * struct batadv_tt_roam_node - roaming client data
- * @addr: mac address of the client in the roaming phase
- * @counter: number of allowed roaming events per client within a single
- *  OGM interval (changes are committed with each OGM)
- * @first_time: timestamp used for purging stale roaming node entries
- * @list: list node for batadv_priv_tt::roam_list
  */
 struct batadv_tt_roam_node {
+       /** @addr: mac address of the client in the roaming phase */
        u8 addr[ETH_ALEN];
+
+       /**
+        * @counter: number of allowed roaming events per client within a single
+        * OGM interval (changes are committed with each OGM)
+        */
        atomic_t counter;
+
+       /**
+        * @first_time: timestamp used for purging stale roaming node entries
+        */
        unsigned long first_time;
+
+       /** @list: list node for &batadv_priv_tt.roam_list */
        struct list_head list;
 };
 
 /**
  * struct batadv_nc_node - network coding node
- * @list: next and prev pointer for the list handling
- * @addr: the node's mac address
- * @refcount: number of contexts the object is used by
- * @rcu: struct used for freeing in an RCU-safe manner
- * @orig_node: pointer to corresponding orig node struct
- * @last_seen: timestamp of last ogm received from this node
  */
 struct batadv_nc_node {
+       /** @list: next and prev pointer for the list handling */
        struct list_head list;
+
+       /** @addr: the node's mac address */
        u8 addr[ETH_ALEN];
+
+       /** @refcount: number of contexts the object is used by */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
+
+       /** @orig_node: pointer to corresponding orig node struct */
        struct batadv_orig_node *orig_node;
+
+       /** @last_seen: timestamp of last ogm received from this node */
        unsigned long last_seen;
 };
 
 /**
  * struct batadv_nc_path - network coding path
- * @hash_entry: next and prev pointer for the list handling
- * @rcu: struct used for freeing in an RCU-safe manner
- * @refcount: number of contexts the object is used by
- * @packet_list: list of buffered packets for this path
- * @packet_list_lock: access lock for packet list
- * @next_hop: next hop (destination) of path
- * @prev_hop: previous hop (source) of path
- * @last_valid: timestamp for last validation of path
  */
 struct batadv_nc_path {
+       /** @hash_entry: next and prev pointer for the list handling */
        struct hlist_node hash_entry;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
+
+       /** @refcount: number of contexts the object is used by */
        struct kref refcount;
+
+       /** @packet_list: list of buffered packets for this path */
        struct list_head packet_list;
-       spinlock_t packet_list_lock; /* Protects packet_list */
+
+       /** @packet_list_lock: access lock for packet list */
+       spinlock_t packet_list_lock;
+
+       /** @next_hop: next hop (destination) of path */
        u8 next_hop[ETH_ALEN];
+
+       /** @prev_hop: previous hop (source) of path */
        u8 prev_hop[ETH_ALEN];
+
+       /** @last_valid: timestamp for last validation of path */
        unsigned long last_valid;
 };
 
 /**
  * struct batadv_nc_packet - network coding packet used when coding and
  *  decoding packets
- * @list: next and prev pointer for the list handling
- * @packet_id: crc32 checksum of skb data
- * @timestamp: field containing the info when the packet was added to path
- * @neigh_node: pointer to original next hop neighbor of skb
- * @skb: skb which can be encoded or used for decoding
- * @nc_path: pointer to path this nc packet is attached to
  */
 struct batadv_nc_packet {
+       /** @list: next and prev pointer for the list handling */
        struct list_head list;
+
+       /** @packet_id: crc32 checksum of skb data */
        __be32 packet_id;
+
+       /**
+        * @timestamp: field containing the info when the packet was added to
+        *  path
+        */
        unsigned long timestamp;
+
+       /** @neigh_node: pointer to original next hop neighbor of skb */
        struct batadv_neigh_node *neigh_node;
+
+       /** @skb: skb which can be encoded or used for decoding */
        struct sk_buff *skb;
+
+       /** @nc_path: pointer to path this nc packet is attached to */
        struct batadv_nc_path *nc_path;
 };
 
 /**
  * struct batadv_skb_cb - control buffer structure used to store private data
  *  relevant to batman-adv in the skb->cb buffer in skbs.
- * @decoded: Marks a skb as decoded, which is checked when searching for coding
- *  opportunities in network-coding.c
- * @num_bcasts: Counter for broadcast packet retransmissions
  */
 struct batadv_skb_cb {
+       /**
+        * @decoded: Marks a skb as decoded, which is checked when searching for
+        *  coding opportunities in network-coding.c
+        */
        bool decoded;
+
+       /** @num_bcasts: Counter for broadcast packet retransmissions */
        unsigned int num_bcasts;
 };
 
 /**
  * struct batadv_forw_packet - structure for bcast packets to be sent/forwarded
- * @list: list node for batadv_priv::forw_{bat,bcast}_list
- * @cleanup_list: list node for purging functions
- * @send_time: execution time for delayed_work (packet sending)
- * @own: bool for locally generated packets (local OGMs are re-scheduled after
- *  sending)
- * @skb: bcast packet's skb buffer
- * @packet_len: size of aggregated OGM packet inside the skb buffer
- * @direct_link_flags: direct link flags for aggregated OGM packets
- * @num_packets: counter for aggregated OGMv1 packets
- * @delayed_work: work queue callback item for packet sending
- * @if_incoming: pointer to incoming hard-iface or primary iface if
- *  locally generated packet
- * @if_outgoing: packet where the packet should be sent to, or NULL if
- *  unspecified
- * @queue_left: The queue (counter) this packet was applied to
  */
 struct batadv_forw_packet {
+       /**
+        * @list: list node for &batadv_priv.forw.bcast_list and
+        *  &batadv_priv.forw.bat_list
+        */
        struct hlist_node list;
+
+       /** @cleanup_list: list node for purging functions */
        struct hlist_node cleanup_list;
+
+       /** @send_time: execution time for delayed_work (packet sending) */
        unsigned long send_time;
+
+       /**
+        * @own: bool for locally generated packets (local OGMs are re-scheduled
+        * after sending)
+        */
        u8 own;
+
+       /** @skb: bcast packet's skb buffer */
        struct sk_buff *skb;
+
+       /** @packet_len: size of aggregated OGM packet inside the skb buffer */
        u16 packet_len;
+
+       /** @direct_link_flags: direct link flags for aggregated OGM packets */
        u32 direct_link_flags;
+
+       /** @num_packets: counter for aggregated OGMv1 packets */
        u8 num_packets;
+
+       /** @delayed_work: work queue callback item for packet sending */
        struct delayed_work delayed_work;
+
+       /**
+        * @if_incoming: pointer to incoming hard-iface or primary iface if
+        *  locally generated packet
+        */
        struct batadv_hard_iface *if_incoming;
+
+       /**
+        * @if_outgoing: packet where the packet should be sent to, or NULL if
+        *  unspecified
+        */
        struct batadv_hard_iface *if_outgoing;
+
+       /** @queue_left: The queue (counter) this packet was applied to */
        atomic_t *queue_left;
 };
 
 /**
  * struct batadv_algo_iface_ops - mesh algorithm callbacks (interface specific)
- * @activate: start routing mechanisms when hard-interface is brought up
- *  (optional)
- * @enable: init routing info when hard-interface is enabled
- * @disable: de-init routing info when hard-interface is disabled
- * @update_mac: (re-)init mac addresses of the protocol information
- *  belonging to this hard-interface
- * @primary_set: called when primary interface is selected / changed
  */
 struct batadv_algo_iface_ops {
+       /**
+        * @activate: start routing mechanisms when hard-interface is brought up
+        *  (optional)
+        */
        void (*activate)(struct batadv_hard_iface *hard_iface);
+
+       /** @enable: init routing info when hard-interface is enabled */
        int (*enable)(struct batadv_hard_iface *hard_iface);
+
+       /** @disable: de-init routing info when hard-interface is disabled */
        void (*disable)(struct batadv_hard_iface *hard_iface);
+
+       /**
+        * @update_mac: (re-)init mac addresses of the protocol information
+        *  belonging to this hard-interface
+        */
        void (*update_mac)(struct batadv_hard_iface *hard_iface);
+
+       /** @primary_set: called when primary interface is selected / changed */
        void (*primary_set)(struct batadv_hard_iface *hard_iface);
 };
 
 /**
  * struct batadv_algo_neigh_ops - mesh algorithm callbacks (neighbour specific)
- * @hardif_init: called on creation of single hop entry
- *  (optional)
- * @cmp: compare the metrics of two neighbors for their respective outgoing
- *  interfaces
- * @is_similar_or_better: check if neigh1 is equally similar or better than
- *  neigh2 for their respective outgoing interface from the metric prospective
- * @print: print the single hop neighbor list (optional)
- * @dump: dump neighbors to a netlink socket (optional)
  */
 struct batadv_algo_neigh_ops {
+       /** @hardif_init: called on creation of single hop entry (optional) */
        void (*hardif_init)(struct batadv_hardif_neigh_node *neigh);
+
+       /**
+        * @cmp: compare the metrics of two neighbors for their respective
+        *  outgoing interfaces
+        */
        int (*cmp)(struct batadv_neigh_node *neigh1,
                   struct batadv_hard_iface *if_outgoing1,
                   struct batadv_neigh_node *neigh2,
                   struct batadv_hard_iface *if_outgoing2);
+
+       /**
+        * @is_similar_or_better: check if neigh1 is equally similar or better
+        *  than neigh2 for their respective outgoing interface from the metric
+        *  prospective
+        */
        bool (*is_similar_or_better)(struct batadv_neigh_node *neigh1,
                                     struct batadv_hard_iface *if_outgoing1,
                                     struct batadv_neigh_node *neigh2,
                                     struct batadv_hard_iface *if_outgoing2);
+
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+       /** @print: print the single hop neighbor list (optional) */
        void (*print)(struct batadv_priv *priv, struct seq_file *seq);
 #endif
+
+       /** @dump: dump neighbors to a netlink socket (optional) */
        void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
                     struct batadv_priv *priv,
                     struct batadv_hard_iface *hard_iface);
@@ -1466,24 +2173,36 @@ struct batadv_algo_neigh_ops {
 
 /**
  * struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific)
- * @free: free the resources allocated by the routing algorithm for an orig_node
- *  object (optional)
- * @add_if: ask the routing algorithm to apply the needed changes to the
- *  orig_node due to a new hard-interface being added into the mesh (optional)
- * @del_if: ask the routing algorithm to apply the needed changes to the
- *  orig_node due to an hard-interface being removed from the mesh (optional)
- * @print: print the originator table (optional)
- * @dump: dump originators to a netlink socket (optional)
  */
 struct batadv_algo_orig_ops {
+       /**
+        * @free: free the resources allocated by the routing algorithm for an
+        *  orig_node object (optional)
+        */
        void (*free)(struct batadv_orig_node *orig_node);
+
+       /**
+        * @add_if: ask the routing algorithm to apply the needed changes to the
+        *  orig_node due to a new hard-interface being added into the mesh
+        *  (optional)
+        */
        int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num);
+
+       /**
+        * @del_if: ask the routing algorithm to apply the needed changes to the
+        *  orig_node due to an hard-interface being removed from the mesh
+        *  (optional)
+        */
        int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num,
                      int del_if_num);
+
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+       /** @print: print the originator table (optional) */
        void (*print)(struct batadv_priv *priv, struct seq_file *seq,
                      struct batadv_hard_iface *hard_iface);
 #endif
+
+       /** @dump: dump originators to a netlink socket (optional) */
        void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
                     struct batadv_priv *priv,
                     struct batadv_hard_iface *hard_iface);
@@ -1491,158 +2210,213 @@ struct batadv_algo_orig_ops {
 
 /**
  * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
- * @init_sel_class: initialize GW selection class (optional)
- * @store_sel_class: parse and stores a new GW selection class (optional)
- * @show_sel_class: prints the current GW selection class (optional)
- * @get_best_gw_node: select the best GW from the list of available nodes
- *  (optional)
- * @is_eligible: check if a newly discovered GW is a potential candidate for
- *  the election as best GW (optional)
- * @print: print the gateway table (optional)
- * @dump: dump gateways to a netlink socket (optional)
  */
 struct batadv_algo_gw_ops {
+       /** @init_sel_class: initialize GW selection class (optional) */
        void (*init_sel_class)(struct batadv_priv *bat_priv);
+
+       /**
+        * @store_sel_class: parse and stores a new GW selection class
+        *  (optional)
+        */
        ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
                                   size_t count);
+
+       /** @show_sel_class: prints the current GW selection class (optional) */
        ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
+
+       /**
+        * @get_best_gw_node: select the best GW from the list of available
+        *  nodes (optional)
+        */
        struct batadv_gw_node *(*get_best_gw_node)
                (struct batadv_priv *bat_priv);
+
+       /**
+        * @is_eligible: check if a newly discovered GW is a potential candidate
+        *  for the election as best GW (optional)
+        */
        bool (*is_eligible)(struct batadv_priv *bat_priv,
                            struct batadv_orig_node *curr_gw_orig,
                            struct batadv_orig_node *orig_node);
+
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+       /** @print: print the gateway table (optional) */
        void (*print)(struct batadv_priv *bat_priv, struct seq_file *seq);
 #endif
+
+       /** @dump: dump gateways to a netlink socket (optional) */
        void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
                     struct batadv_priv *priv);
 };
 
 /**
  * struct batadv_algo_ops - mesh algorithm callbacks
- * @list: list node for the batadv_algo_list
- * @name: name of the algorithm
- * @iface: callbacks related to interface handling
- * @neigh: callbacks related to neighbors handling
- * @orig: callbacks related to originators handling
- * @gw: callbacks related to GW mode
  */
 struct batadv_algo_ops {
+       /** @list: list node for the batadv_algo_list */
        struct hlist_node list;
+
+       /** @name: name of the algorithm */
        char *name;
+
+       /** @iface: callbacks related to interface handling */
        struct batadv_algo_iface_ops iface;
+
+       /** @neigh: callbacks related to neighbors handling */
        struct batadv_algo_neigh_ops neigh;
+
+       /** @orig: callbacks related to originators handling */
        struct batadv_algo_orig_ops orig;
+
+       /** @gw: callbacks related to GW mode */
        struct batadv_algo_gw_ops gw;
 };
 
 /**
  * struct batadv_dat_entry - it is a single entry of batman-adv ARP backend. It
  * is used to stored ARP entries needed for the global DAT cache
- * @ip: the IPv4 corresponding to this DAT/ARP entry
- * @mac_addr: the MAC address associated to the stored IPv4
- * @vid: the vlan ID associated to this entry
- * @last_update: time in jiffies when this entry was refreshed last time
- * @hash_entry: hlist node for batadv_priv_dat::hash
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_dat_entry {
+       /** @ip: the IPv4 corresponding to this DAT/ARP entry */
        __be32 ip;
+
+       /** @mac_addr: the MAC address associated to the stored IPv4 */
        u8 mac_addr[ETH_ALEN];
+
+       /** @vid: the vlan ID associated to this entry */
        unsigned short vid;
+
+       /**
+        * @last_update: time in jiffies when this entry was refreshed last time
+        */
        unsigned long last_update;
+
+       /** @hash_entry: hlist node for &batadv_priv_dat.hash */
        struct hlist_node hash_entry;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_hw_addr - a list entry for a MAC address
- * @list: list node for the linking of entries
- * @addr: the MAC address of this list entry
  */
 struct batadv_hw_addr {
+       /** @list: list node for the linking of entries */
        struct hlist_node list;
+
+       /** @addr: the MAC address of this list entry */
        unsigned char addr[ETH_ALEN];
 };
 
 /**
  * struct batadv_dat_candidate - candidate destination for DAT operations
- * @type: the type of the selected candidate. It can one of the following:
- *       - BATADV_DAT_CANDIDATE_NOT_FOUND
- *       - BATADV_DAT_CANDIDATE_ORIG
- * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to the
- *            corresponding originator node structure
  */
 struct batadv_dat_candidate {
+       /**
+        * @type: the type of the selected candidate. It can one of the
+        *  following:
+        *        - BATADV_DAT_CANDIDATE_NOT_FOUND
+        *        - BATADV_DAT_CANDIDATE_ORIG
+        */
        int type;
+
+       /**
+        * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to
+        * the corresponding originator node structure
+        */
        struct batadv_orig_node *orig_node;
 };
 
 /**
  * struct batadv_tvlv_container - container for tvlv appended to OGMs
- * @list: hlist node for batadv_priv_tvlv::container_list
- * @tvlv_hdr: tvlv header information needed to construct the tvlv
- * @refcount: number of contexts the object is used
  */
 struct batadv_tvlv_container {
+       /** @list: hlist node for &batadv_priv_tvlv.container_list */
        struct hlist_node list;
+
+       /** @tvlv_hdr: tvlv header information needed to construct the tvlv */
        struct batadv_tvlv_hdr tvlv_hdr;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
 };
 
 /**
  * struct batadv_tvlv_handler - handler for specific tvlv type and version
- * @list: hlist node for batadv_priv_tvlv::handler_list
- * @ogm_handler: handler callback which is given the tvlv payload to process on
- *  incoming OGM packets
- * @unicast_handler: handler callback which is given the tvlv payload to process
- *  on incoming unicast tvlv packets
- * @type: tvlv type this handler feels responsible for
- * @version: tvlv version this handler feels responsible for
- * @flags: tvlv handler flags
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tvlv_handler {
+       /** @list: hlist node for &batadv_priv_tvlv.handler_list */
        struct hlist_node list;
+
+       /**
+        * @ogm_handler: handler callback which is given the tvlv payload to
+        *  process on incoming OGM packets
+        */
        void (*ogm_handler)(struct batadv_priv *bat_priv,
                            struct batadv_orig_node *orig,
                            u8 flags, void *tvlv_value, u16 tvlv_value_len);
+
+       /**
+        * @unicast_handler: handler callback which is given the tvlv payload to
+        *  process on incoming unicast tvlv packets
+        */
        int (*unicast_handler)(struct batadv_priv *bat_priv,
                               u8 *src, u8 *dst,
                               void *tvlv_value, u16 tvlv_value_len);
+
+       /** @type: tvlv type this handler feels responsible for */
        u8 type;
+
+       /** @version: tvlv version this handler feels responsible for */
        u8 version;
+
+       /** @flags: tvlv handler flags */
        u8 flags;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * enum batadv_tvlv_handler_flags - tvlv handler flags definitions
- * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function will call
- *  this handler even if its type was not found (with no data)
- * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the API marks
- *  a handler as being called, so it won't be called if the
- *  BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
  */
 enum batadv_tvlv_handler_flags {
+       /**
+        * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function
+        *  will call this handler even if its type was not found (with no data)
+        */
        BATADV_TVLV_HANDLER_OGM_CIFNOTFND = BIT(1),
+
+       /**
+        * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the
+        *  API marks a handler as being called, so it won't be called if the
+        *  BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
+        */
        BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2),
 };
 
 /**
  * struct batadv_store_mesh_work - Work queue item to detach add/del interface
  *  from sysfs locks
- * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
- * @soft_iface_name: name of soft-interface to modify
- * @work: work queue item
  */
 struct batadv_store_mesh_work {
+       /**
+        * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
+        */
        struct net_device *net_dev;
+
+       /** @soft_iface_name: name of soft-interface to modify */
        char soft_iface_name[IFNAMSIZ];
+
+       /** @work: work queue item */
        struct work_struct work;
 };
 
index 91e3ba28070647bc93960c1d729aa200b666b249..f044202346c63c02355ba1d4599acd0f0c299348 100644 (file)
@@ -766,43 +766,39 @@ static int __init bt_init(void)
                return err;
 
        err = sock_register(&bt_sock_family_ops);
-       if (err < 0) {
-               bt_sysfs_cleanup();
-               return err;
-       }
+       if (err)
+               goto cleanup_sysfs;
 
        BT_INFO("HCI device and connection manager initialized");
 
        err = hci_sock_init();
-       if (err < 0)
-               goto error;
+       if (err)
+               goto unregister_socket;
 
        err = l2cap_init();
-       if (err < 0)
-               goto sock_err;
+       if (err)
+               goto cleanup_socket;
 
        err = sco_init();
-       if (err < 0) {
-               l2cap_exit();
-               goto sock_err;
-       }
+       if (err)
+               goto cleanup_cap;
 
        err = mgmt_init();
-       if (err < 0) {
-               sco_exit();
-               l2cap_exit();
-               goto sock_err;
-       }
+       if (err)
+               goto cleanup_sco;
 
        return 0;
 
-sock_err:
+cleanup_sco:
+       sco_exit();
+cleanup_cap:
+       l2cap_exit();
+cleanup_socket:
        hci_sock_cleanup();
-
-error:
+unregister_socket:
        sock_unregister(PF_BLUETOOTH);
+cleanup_sysfs:
        bt_sysfs_cleanup();
-
        return err;
 }
 
index 63df63ebfb249a440a63137358529956bc616ffd..57403bd567d0680f7e890b6e3ddcf7f83f9511fe 100644 (file)
@@ -88,6 +88,9 @@ static int __name ## _show(struct seq_file *f, void *ptr)                   \
        return 0;                                                             \
 }                                                                            \
                                                                              \
+DEFINE_SHOW_ATTRIBUTE(__name)
+
+#define DEFINE_SHOW_ATTRIBUTE(__name)                                        \
 static int __name ## _open(struct inode *inode, struct file *file)           \
 {                                                                            \
        return single_open(file, __name ## _show, inode->i_private);          \
@@ -106,37 +109,16 @@ static int features_show(struct seq_file *f, void *ptr)
        u8 p;
 
        hci_dev_lock(hdev);
-       for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
-               seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
-                          "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
-                          hdev->features[p][0], hdev->features[p][1],
-                          hdev->features[p][2], hdev->features[p][3],
-                          hdev->features[p][4], hdev->features[p][5],
-                          hdev->features[p][6], hdev->features[p][7]);
-       }
+       for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++)
+               seq_printf(f, "%2u: %8ph\n", p, hdev->features[p]);
        if (lmp_le_capable(hdev))
-               seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
-                          "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
-                          hdev->le_features[0], hdev->le_features[1],
-                          hdev->le_features[2], hdev->le_features[3],
-                          hdev->le_features[4], hdev->le_features[5],
-                          hdev->le_features[6], hdev->le_features[7]);
+               seq_printf(f, "LE: %8ph\n", hdev->le_features);
        hci_dev_unlock(hdev);
 
        return 0;
 }
 
-static int features_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, features_show, inode->i_private);
-}
-
-static const struct file_operations features_fops = {
-       .open           = features_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(features);
 
 static int device_id_show(struct seq_file *f, void *ptr)
 {
@@ -150,17 +132,7 @@ static int device_id_show(struct seq_file *f, void *ptr)
        return 0;
 }
 
-static int device_id_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, device_id_show, inode->i_private);
-}
-
-static const struct file_operations device_id_fops = {
-       .open           = device_id_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(device_id);
 
 static int device_list_show(struct seq_file *f, void *ptr)
 {
@@ -180,17 +152,7 @@ static int device_list_show(struct seq_file *f, void *ptr)
        return 0;
 }
 
-static int device_list_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, device_list_show, inode->i_private);
-}
-
-static const struct file_operations device_list_fops = {
-       .open           = device_list_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(device_list);
 
 static int blacklist_show(struct seq_file *f, void *p)
 {
@@ -205,17 +167,7 @@ static int blacklist_show(struct seq_file *f, void *p)
        return 0;
 }
 
-static int blacklist_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, blacklist_show, inode->i_private);
-}
-
-static const struct file_operations blacklist_fops = {
-       .open           = blacklist_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(blacklist);
 
 static int uuids_show(struct seq_file *f, void *p)
 {
@@ -240,17 +192,7 @@ static int uuids_show(struct seq_file *f, void *p)
        return 0;
 }
 
-static int uuids_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, uuids_show, inode->i_private);
-}
-
-static const struct file_operations uuids_fops = {
-       .open           = uuids_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(uuids);
 
 static int remote_oob_show(struct seq_file *f, void *ptr)
 {
@@ -269,17 +211,7 @@ static int remote_oob_show(struct seq_file *f, void *ptr)
        return 0;
 }
 
-static int remote_oob_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, remote_oob_show, inode->i_private);
-}
-
-static const struct file_operations remote_oob_fops = {
-       .open           = remote_oob_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(remote_oob);
 
 static int conn_info_min_age_set(void *data, u64 val)
 {
@@ -443,17 +375,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
        return 0;
 }
 
-static int inquiry_cache_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, inquiry_cache_show, inode->i_private);
-}
-
-static const struct file_operations inquiry_cache_fops = {
-       .open           = inquiry_cache_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(inquiry_cache);
 
 static int link_keys_show(struct seq_file *f, void *ptr)
 {
@@ -469,17 +391,7 @@ static int link_keys_show(struct seq_file *f, void *ptr)
        return 0;
 }
 
-static int link_keys_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, link_keys_show, inode->i_private);
-}
-
-static const struct file_operations link_keys_fops = {
-       .open           = link_keys_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(link_keys);
 
 static int dev_class_show(struct seq_file *f, void *ptr)
 {
@@ -493,17 +405,7 @@ static int dev_class_show(struct seq_file *f, void *ptr)
        return 0;
 }
 
-static int dev_class_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, dev_class_show, inode->i_private);
-}
-
-static const struct file_operations dev_class_fops = {
-       .open           = dev_class_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dev_class);
 
 static int voice_setting_get(void *data, u64 *val)
 {
@@ -692,17 +594,7 @@ static int identity_show(struct seq_file *f, void *p)
        return 0;
 }
 
-static int identity_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, identity_show, inode->i_private);
-}
-
-static const struct file_operations identity_fops = {
-       .open           = identity_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(identity);
 
 static int rpa_timeout_set(void *data, u64 val)
 {
@@ -746,17 +638,7 @@ static int random_address_show(struct seq_file *f, void *p)
        return 0;
 }
 
-static int random_address_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, random_address_show, inode->i_private);
-}
-
-static const struct file_operations random_address_fops = {
-       .open           = random_address_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(random_address);
 
 static int static_address_show(struct seq_file *f, void *p)
 {
@@ -769,17 +651,7 @@ static int static_address_show(struct seq_file *f, void *p)
        return 0;
 }
 
-static int static_address_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, static_address_show, inode->i_private);
-}
-
-static const struct file_operations static_address_fops = {
-       .open           = static_address_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(static_address);
 
 static ssize_t force_static_address_read(struct file *file,
                                         char __user *user_buf,
@@ -841,17 +713,7 @@ static int white_list_show(struct seq_file *f, void *ptr)
        return 0;
 }
 
-static int white_list_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, white_list_show, inode->i_private);
-}
-
-static const struct file_operations white_list_fops = {
-       .open           = white_list_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(white_list);
 
 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
 {
@@ -869,18 +731,7 @@ static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
        return 0;
 }
 
-static int identity_resolving_keys_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, identity_resolving_keys_show,
-                          inode->i_private);
-}
-
-static const struct file_operations identity_resolving_keys_fops = {
-       .open           = identity_resolving_keys_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(identity_resolving_keys);
 
 static int long_term_keys_show(struct seq_file *f, void *ptr)
 {
@@ -898,17 +749,7 @@ static int long_term_keys_show(struct seq_file *f, void *ptr)
        return 0;
 }
 
-static int long_term_keys_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, long_term_keys_show, inode->i_private);
-}
-
-static const struct file_operations long_term_keys_fops = {
-       .open           = long_term_keys_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(long_term_keys);
 
 static int conn_min_interval_set(void *data, u64 val)
 {
index abc0f3224dd1910e95bb00e7692acaca844e4361..3394e679167380191e9505880a265062fe904043 100644 (file)
@@ -919,6 +919,43 @@ static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
        return true;
 }
 
+static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
+{
+       /* If there is no connection we are OK to advertise. */
+       if (hci_conn_num(hdev, LE_LINK) == 0)
+               return true;
+
+       /* Check le_states if there is any connection in slave role. */
+       if (hdev->conn_hash.le_num_slave > 0) {
+               /* Slave connection state and non connectable mode bit 20. */
+               if (!connectable && !(hdev->le_states[2] & 0x10))
+                       return false;
+
+               /* Slave connection state and connectable mode bit 38
+                * and scannable bit 21.
+                */
+               if (connectable && (!(hdev->le_states[4] & 0x01) ||
+                                   !(hdev->le_states[2] & 0x40)))
+                       return false;
+       }
+
+       /* Check le_states if there is any connection in master role. */
+       if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
+               /* Master connection state and non connectable mode bit 18. */
+               if (!connectable && !(hdev->le_states[2] & 0x02))
+                       return false;
+
+               /* Master connection state and connectable mode bit 35 and
+                * scannable 19.
+                */
+               if (connectable && (!(hdev->le_states[4] & 0x10) ||
+                                   !(hdev->le_states[2] & 0x08)))
+                       return false;
+       }
+
+       return true;
+}
+
 void __hci_req_enable_advertising(struct hci_request *req)
 {
        struct hci_dev *hdev = req->hdev;
@@ -927,7 +964,15 @@ void __hci_req_enable_advertising(struct hci_request *req)
        bool connectable;
        u32 flags;
 
-       if (hci_conn_num(hdev, LE_LINK) > 0)
+       flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
+
+       /* If the "connectable" instance flag was not set, then choose between
+        * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
+        */
+       connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
+                     mgmt_get_connectable(hdev);
+
+       if (!is_advertising_allowed(hdev, connectable))
                return;
 
        if (hci_dev_test_flag(hdev, HCI_LE_ADV))
@@ -940,14 +985,6 @@ void __hci_req_enable_advertising(struct hci_request *req)
         */
        hci_dev_clear_flag(hdev, HCI_LE_ADV);
 
-       flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
-
-       /* If the "connectable" instance flag was not set, then choose between
-        * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
-        */
-       connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
-                     mgmt_get_connectable(hdev);
-
        /* Set require_privacy to true only when non-connectable
         * advertising is used. In that case it is fine to use a
         * non-resolvable private address.
@@ -1985,13 +2022,6 @@ unlock:
        hci_dev_unlock(hdev);
 }
 
-static void disable_advertising(struct hci_request *req)
-{
-       u8 enable = 0x00;
-
-       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
-}
-
 static int active_scan(struct hci_request *req, unsigned long opt)
 {
        uint16_t interval = opt;
@@ -2017,7 +2047,7 @@ static int active_scan(struct hci_request *req, unsigned long opt)
                cancel_adv_timeout(hdev);
                hci_dev_unlock(hdev);
 
-               disable_advertising(req);
+               __hci_req_disable_advertising(req);
        }
 
        /* If controller is scanning, it means the background scanning is
index d0ef0a8e8831920cb86fc767eb0d756bf89feb46..015f465c514b28564c9e91eec40dc041b765fe25 100644 (file)
@@ -1262,19 +1262,20 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
        struct net_bridge *br = netdev_priv(dev);
        int err;
 
+       err = register_netdevice(dev);
+       if (err)
+               return err;
+
        if (tb[IFLA_ADDRESS]) {
                spin_lock_bh(&br->lock);
                br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
                spin_unlock_bh(&br->lock);
        }
 
-       err = register_netdevice(dev);
-       if (err)
-               return err;
-
        err = br_changelink(dev, tb, data, extack);
        if (err)
-               unregister_netdevice(dev);
+               br_dev_delete(dev, NULL);
+
        return err;
 }
 
index 723f25eed8ea0de05cb554ec105d6048585ea0be..b1be0dcfba6bb6e83a9ccbc2294da4772d88f16c 100644 (file)
@@ -272,10 +272,7 @@ static ssize_t group_addr_show(struct device *d,
                               struct device_attribute *attr, char *buf)
 {
        struct net_bridge *br = to_bridge(d);
-       return sprintf(buf, "%x:%x:%x:%x:%x:%x\n",
-                      br->group_addr[0], br->group_addr[1],
-                      br->group_addr[2], br->group_addr[3],
-                      br->group_addr[4], br->group_addr[5]);
+       return sprintf(buf, "%pM\n", br->group_addr);
 }
 
 static ssize_t group_addr_store(struct device *d,
@@ -284,14 +281,11 @@ static ssize_t group_addr_store(struct device *d,
 {
        struct net_bridge *br = to_bridge(d);
        u8 new_addr[6];
-       int i;
 
        if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
-       if (sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
-                  &new_addr[0], &new_addr[1], &new_addr[2],
-                  &new_addr[3], &new_addr[4], &new_addr[5]) != 6)
+       if (!mac_pton(buf, new_addr))
                return -EINVAL;
 
        if (!is_link_local_ether_addr(new_addr))
@@ -306,8 +300,7 @@ static ssize_t group_addr_store(struct device *d,
                return restart_syscall();
 
        spin_lock_bh(&br->lock);
-       for (i = 0; i < 6; i++)
-               br->group_addr[i] = new_addr[i];
+       ether_addr_copy(br->group_addr, new_addr);
        spin_unlock_bh(&br->lock);
 
        br->group_addr_set = true;
index b0eee49a2489e203c81f64b091c99ba001567380..2eb66c0d9cdbae57896f46fc90e398ffdf8186b0 100644 (file)
@@ -1542,6 +1542,23 @@ void dev_disable_lro(struct net_device *dev)
 }
 EXPORT_SYMBOL(dev_disable_lro);
 
+/**
+ *     dev_disable_gro_hw - disable HW Generic Receive Offload on a device
+ *     @dev: device
+ *
+ *     Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
+ *     called under RTNL.  This is needed if Generic XDP is installed on
+ *     the device.
+ */
+static void dev_disable_gro_hw(struct net_device *dev)
+{
+       dev->wanted_features &= ~NETIF_F_GRO_HW;
+       netdev_update_features(dev);
+
+       if (unlikely(dev->features & NETIF_F_GRO_HW))
+               netdev_WARN(dev, "failed to disable GRO_HW!\n");
+}
+
 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
                                   struct net_device *dev)
 {
@@ -3042,7 +3059,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
 }
 EXPORT_SYMBOL(skb_csum_hwoffload_help);
 
-static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
 {
        netdev_features_t features;
 
@@ -3066,9 +3083,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
                    __skb_linearize(skb))
                        goto out_kfree_skb;
 
-               if (validate_xmit_xfrm(skb, features))
-                       goto out_kfree_skb;
-
                /* If packet is not checksummed and device does not
                 * support checksumming for this protocol, complete
                 * checksumming here.
@@ -3085,6 +3099,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
                }
        }
 
+       skb = validate_xmit_xfrm(skb, features, again);
+
        return skb;
 
 out_kfree_skb:
@@ -3094,7 +3110,7 @@ out_null:
        return NULL;
 }
 
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
 {
        struct sk_buff *next, *head = NULL, *tail;
 
@@ -3105,7 +3121,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
                /* in case skb wont be segmented, point to itself */
                skb->prev = skb;
 
-               skb = validate_xmit_skb(skb, dev);
+               skb = validate_xmit_skb(skb, dev, again);
                if (!skb)
                        continue;
 
@@ -3432,6 +3448,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
        struct netdev_queue *txq;
        struct Qdisc *q;
        int rc = -ENOMEM;
+       bool again = false;
 
        skb_reset_mac_header(skb);
 
@@ -3493,7 +3510,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
                                     XMIT_RECURSION_LIMIT))
                                goto recursion_alert;
 
-                       skb = validate_xmit_skb(skb, dev);
+                       skb = validate_xmit_skb(skb, dev, &again);
                        if (!skb)
                                goto out;
 
@@ -3920,7 +3937,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
                                     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
                                     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
                        goto do_drop;
-               if (troom > 0 && __skb_linearize(skb))
+               if (skb_linearize(skb))
                        goto do_drop;
        }
 
@@ -4177,6 +4194,8 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
                                spin_unlock(root_lock);
                }
        }
+
+       xfrm_dev_backlog(sd);
 }
 
 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
@@ -4564,6 +4583,7 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
                } else if (new && !old) {
                        static_key_slow_inc(&generic_xdp_needed);
                        dev_disable_lro(dev);
+                       dev_disable_gro_hw(dev);
                }
                break;
 
@@ -7424,6 +7444,18 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
                features &= ~dev->gso_partial_features;
        }
 
+       if (!(features & NETIF_F_RXCSUM)) {
+               /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
+                * successfully merged by hardware must also have the
+                * checksum verified by hardware.  If the user does not
+                * want to enable RXCSUM, logically, we should disable GRO_HW.
+                */
+               if (features & NETIF_F_GRO_HW) {
+                       netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
+                       features &= ~NETIF_F_GRO_HW;
+               }
+       }
+
        return features;
 }
 
@@ -8845,6 +8877,9 @@ static int __init net_dev_init(void)
 
                skb_queue_head_init(&sd->input_pkt_queue);
                skb_queue_head_init(&sd->process_queue);
+#ifdef CONFIG_XFRM_OFFLOAD
+               skb_queue_head_init(&sd->xfrm_backlog);
+#endif
                INIT_LIST_HEAD(&sd->poll_list);
                sd->output_queue_tailp = &sd->output_queue;
 #ifdef CONFIG_RPS
index f8fcf450a36e604fe4c91dcccb4fd2a4a35ef1df..50a79203043b5900584a2272bc168b4767a7e8ff 100644 (file)
@@ -73,6 +73,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
        [NETIF_F_LLTX_BIT] =             "tx-lockless",
        [NETIF_F_NETNS_LOCAL_BIT] =      "netns-local",
        [NETIF_F_GRO_BIT] =              "rx-gro",
+       [NETIF_F_GRO_HW_BIT] =           "rx-gro-hw",
        [NETIF_F_LRO_BIT] =              "rx-lro",
 
        [NETIF_F_TSO_BIT] =              "tx-tcp-segmentation",
index cc75488d3653710894ba6ce3846d9ca4e4712298..02db7b122a7328a07d1ee75eec5580039f168b7c 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/tcp.h>
 #include <net/flow_dissector.h>
 #include <scsi/fc/fc_fcoe.h>
+#include <uapi/linux/batadv_packet.h>
 
 static void dissector_set_key(struct flow_dissector *flow_dissector,
                              enum flow_dissector_key_id key_id)
@@ -437,6 +438,57 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
        return FLOW_DISSECT_RET_PROTO_AGAIN;
 }
 
+/**
+ * __skb_flow_dissect_batadv() - dissect batman-adv header
+ * @skb: sk_buff to with the batman-adv header
+ * @key_control: flow dissectors control key
+ * @data: raw buffer pointer to the packet, if NULL use skb->data
+ * @p_proto: pointer used to update the protocol to process next
+ * @p_nhoff: pointer used to update inner network header offset
+ * @hlen: packet header length
+ * @flags: any combination of FLOW_DISSECTOR_F_*
+ *
+ * ETH_P_BATMAN packets are tried to be dissected. Only
+ * &struct batadv_unicast packets are actually processed because they contain an
+ * inner ethernet header and are usually followed by actual network header. This
+ * allows the flow dissector to continue processing the packet.
+ *
+ * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
+ *  FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
+ *  otherwise FLOW_DISSECT_RET_OUT_BAD
+ */
+static enum flow_dissect_ret
+__skb_flow_dissect_batadv(const struct sk_buff *skb,
+                         struct flow_dissector_key_control *key_control,
+                         void *data, __be16 *p_proto, int *p_nhoff, int hlen,
+                         unsigned int flags)
+{
+       struct {
+               struct batadv_unicast_packet batadv_unicast;
+               struct ethhdr eth;
+       } *hdr, _hdr;
+
+       hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
+                                  &_hdr);
+       if (!hdr)
+               return FLOW_DISSECT_RET_OUT_BAD;
+
+       if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
+               return FLOW_DISSECT_RET_OUT_BAD;
+
+       if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
+               return FLOW_DISSECT_RET_OUT_BAD;
+
+       *p_proto = hdr->eth.h_proto;
+       *p_nhoff += sizeof(*hdr);
+
+       key_control->flags |= FLOW_DIS_ENCAPSULATION;
+       if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+               return FLOW_DISSECT_RET_OUT_GOOD;
+
+       return FLOW_DISSECT_RET_PROTO_AGAIN;
+}
+
 static void
 __skb_flow_dissect_tcp(const struct sk_buff *skb,
                       struct flow_dissector *flow_dissector,
@@ -815,6 +867,11 @@ proto_again:
                                               nhoff, hlen);
                break;
 
+       case htons(ETH_P_BATMAN):
+               fdret = __skb_flow_dissect_batadv(skb, key_control, data,
+                                                 &proto, &nhoff, hlen, flags);
+               break;
+
        default:
                fdret = FLOW_DISSECT_RET_OUT_BAD;
                break;
index b797832565d34ccefb374ee87f9cbc460779a484..60a71be75aea063b418a48ade2a1e1c7804ab35c 100644 (file)
@@ -267,7 +267,7 @@ struct net *get_net_ns_by_id(struct net *net, int id)
        spin_lock_bh(&net->nsid_lock);
        peer = idr_find(&net->netns_ids, id);
        if (peer)
-               get_net(peer);
+               peer = maybe_get_net(peer);
        spin_unlock_bh(&net->nsid_lock);
        rcu_read_unlock();
 
index a592ca025fc46bf4ff26c900d273b1ae12afa334..00b0757830e2c3224ebdb696e4eb8cfc36d87602 100644 (file)
@@ -1178,7 +1178,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
        u32 d_off;
 
        if (!num_frags)
-               return 0;
+               goto release;
 
        if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
                return -EINVAL;
@@ -1238,6 +1238,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
        __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
        skb_shinfo(skb)->nr_frags = new_frags;
 
+release:
        skb_zcopy_clear(skb, false);
        return 0;
 }
@@ -3654,7 +3655,9 @@ normal:
 
                skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
                                              SKBTX_SHARED_FRAG;
-               if (skb_zerocopy_clone(nskb, head_skb, GFP_ATOMIC))
+
+               if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+                   skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
                        goto err;
 
                while (pos < offset + len) {
@@ -3668,6 +3671,11 @@ normal:
 
                                BUG_ON(!nfrags);
 
+                               if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+                                   skb_zerocopy_clone(nskb, frag_skb,
+                                                      GFP_ATOMIC))
+                                       goto err;
+
                                list_skb = list_skb->next;
                        }
 
@@ -3679,9 +3687,6 @@ normal:
                                goto err;
                        }
 
-                       if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
-                               goto err;
-
                        *nskb_frag = *frag;
                        __skb_frag_ref(nskb_frag);
                        size = skb_frag_size(nskb_frag);
index c0b5b2f17412ec3ac3d4b1cf400fb2fbcabf086f..72d14b221784b54dfcf8a5f145e59ee1cbe13830 100644 (file)
 static DEFINE_MUTEX(proto_list_mutex);
 static LIST_HEAD(proto_list);
 
+static void sock_inuse_add(struct net *net, int val);
+
 /**
  * sk_ns_capable - General socket capability test
  * @sk: Socket to use a capability on or through
@@ -1531,8 +1533,11 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
                sk->sk_kern_sock = kern;
                sock_lock_init(sk);
                sk->sk_net_refcnt = kern ? 0 : 1;
-               if (likely(sk->sk_net_refcnt))
+               if (likely(sk->sk_net_refcnt)) {
                        get_net(net);
+                       sock_inuse_add(net, 1);
+               }
+
                sock_net_set(sk, net);
                refcount_set(&sk->sk_wmem_alloc, 1);
 
@@ -1595,6 +1600,9 @@ void sk_destruct(struct sock *sk)
 
 static void __sk_free(struct sock *sk)
 {
+       if (likely(sk->sk_net_refcnt))
+               sock_inuse_add(sock_net(sk), -1);
+
        if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
                sock_diag_broadcast_destroy(sk);
        else
@@ -1716,6 +1724,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                newsk->sk_priority = 0;
                newsk->sk_incoming_cpu = raw_smp_processor_id();
                atomic64_set(&newsk->sk_cookie, 0);
+               if (likely(newsk->sk_net_refcnt))
+                       sock_inuse_add(sock_net(newsk), 1);
 
                /*
                 * Before updating sk_refcnt, we must commit prior changes to memory
@@ -3045,7 +3055,7 @@ static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
 
 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
 {
-       __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
+       __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
 }
 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
 
@@ -3055,21 +3065,50 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot)
        int res = 0;
 
        for_each_possible_cpu(cpu)
-               res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
+               res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
 
        return res >= 0 ? res : 0;
 }
 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
 
+static void sock_inuse_add(struct net *net, int val)
+{
+       this_cpu_add(*net->core.sock_inuse, val);
+}
+
+int sock_inuse_get(struct net *net)
+{
+       int cpu, res = 0;
+
+       for_each_possible_cpu(cpu)
+               res += *per_cpu_ptr(net->core.sock_inuse, cpu);
+
+       return res;
+}
+
+EXPORT_SYMBOL_GPL(sock_inuse_get);
+
 static int __net_init sock_inuse_init_net(struct net *net)
 {
-       net->core.inuse = alloc_percpu(struct prot_inuse);
-       return net->core.inuse ? 0 : -ENOMEM;
+       net->core.prot_inuse = alloc_percpu(struct prot_inuse);
+       if (net->core.prot_inuse == NULL)
+               return -ENOMEM;
+
+       net->core.sock_inuse = alloc_percpu(int);
+       if (net->core.sock_inuse == NULL)
+               goto out;
+
+       return 0;
+
+out:
+       free_percpu(net->core.prot_inuse);
+       return -ENOMEM;
 }
 
 static void __net_exit sock_inuse_exit_net(struct net *net)
 {
-       free_percpu(net->core.inuse);
+       free_percpu(net->core.prot_inuse);
+       free_percpu(net->core.sock_inuse);
 }
 
 static struct pernet_operations net_inuse_ops = {
@@ -3112,6 +3151,10 @@ static inline void assign_proto_idx(struct proto *prot)
 static inline void release_proto_idx(struct proto *prot)
 {
 }
+
+static void sock_inuse_add(struct net *net, int val)
+{
+}
 #endif
 
 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
index 9d43c1f4027408f3a2176767da0dd425938ba652..7a75a1d3568bc3e422b5c35f9f5d7883fda2e6cc 100644 (file)
@@ -110,7 +110,7 @@ void dccp_set_state(struct sock *sk, const int state)
        /* Change state AFTER socket is unhashed to avoid closed
         * socket sitting in hash tables.
         */
-       sk->sk_state = state;
+       inet_sk_set_state(sk, state);
 }
 
 EXPORT_SYMBOL_GPL(dccp_set_state);
index f00499a469271fb2165c8ca25fe3b4538e45a01e..bab98a4fedadef30fe3e6b38a88bbc98f7b906c9 100644 (file)
 #endif
 #include <net/l3mdev.h>
 
+#include <trace/events/sock.h>
 
 /* The inetsw table contains everything that inet_create needs to
  * build a new socket.
@@ -1220,6 +1221,19 @@ int inet_sk_rebuild_header(struct sock *sk)
 }
 EXPORT_SYMBOL(inet_sk_rebuild_header);
 
+void inet_sk_set_state(struct sock *sk, int state)
+{
+       trace_inet_sock_set_state(sk, sk->sk_state, state);
+       sk->sk_state = state;
+}
+EXPORT_SYMBOL(inet_sk_set_state);
+
+void inet_sk_state_store(struct sock *sk, int newstate)
+{
+       trace_inet_sock_set_state(sk, sk->sk_state, newstate);
+       smp_store_release(&sk->sk_state, newstate);
+}
+
 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                                 netdev_features_t features)
 {
index d57aa64fa7c7d44307e6ecf9bf401c0a8ea21b87..6f00e43120a86028c91f71bf710c643c0bd8470c 100644 (file)
@@ -121,14 +121,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
 static void esp_output_done(struct crypto_async_request *base, int err)
 {
        struct sk_buff *skb = base->data;
+       struct xfrm_offload *xo = xfrm_offload(skb);
        void *tmp;
-       struct dst_entry *dst = skb_dst(skb);
-       struct xfrm_state *x = dst->xfrm;
+       struct xfrm_state *x;
+
+       if (xo && (xo->flags & XFRM_DEV_RESUME))
+               x = skb->sp->xvec[skb->sp->len - 1];
+       else
+               x = skb_dst(skb)->xfrm;
 
        tmp = ESP_SKB_CB(skb)->tmp;
        esp_ssg_unref(x, tmp);
        kfree(tmp);
-       xfrm_output_resume(skb, err);
+
+       if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+               if (err) {
+                       XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+                       kfree_skb(skb);
+                       return;
+               }
+
+               skb_push(skb, skb->data - skb_mac_header(skb));
+               secpath_reset(skb);
+               xfrm_dev_resume(skb);
+       } else {
+               xfrm_output_resume(skb, err);
+       }
 }
 
 /* Move ESP header back into place. */
@@ -825,17 +843,13 @@ static int esp_init_aead(struct xfrm_state *x)
        char aead_name[CRYPTO_MAX_ALG_NAME];
        struct crypto_aead *aead;
        int err;
-       u32 mask = 0;
 
        err = -ENAMETOOLONG;
        if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
                     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
                goto error;
 
-       if (x->xso.offload_handle)
-               mask |= CRYPTO_ALG_ASYNC;
-
-       aead = crypto_alloc_aead(aead_name, 0, mask);
+       aead = crypto_alloc_aead(aead_name, 0, 0);
        err = PTR_ERR(aead);
        if (IS_ERR(aead))
                goto error;
@@ -865,7 +879,6 @@ static int esp_init_authenc(struct xfrm_state *x)
        char authenc_name[CRYPTO_MAX_ALG_NAME];
        unsigned int keylen;
        int err;
-       u32 mask = 0;
 
        err = -EINVAL;
        if (!x->ealg)
@@ -891,10 +904,7 @@ static int esp_init_authenc(struct xfrm_state *x)
                        goto error;
        }
 
-       if (x->xso.offload_handle)
-               mask |= CRYPTO_ALG_ASYNC;
-
-       aead = crypto_alloc_aead(authenc_name, 0, mask);
+       aead = crypto_alloc_aead(authenc_name, 0, 0);
        err = PTR_ERR(aead);
        if (IS_ERR(aead))
                goto error;
index f8b918c766b0af1e572ed895dcf2435af92016a9..c359f3cfeec39c238397b6dfd7e374f6704e7ebd 100644 (file)
@@ -108,75 +108,36 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
                                        netdev_features_t features)
 {
-       __u32 seq;
-       int err = 0;
-       struct sk_buff *skb2;
        struct xfrm_state *x;
        struct ip_esp_hdr *esph;
        struct crypto_aead *aead;
-       struct sk_buff *segs = ERR_PTR(-EINVAL);
        netdev_features_t esp_features = features;
        struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (!xo)
-               goto out;
-
-       seq = xo->seq.low;
+               return ERR_PTR(-EINVAL);
 
        x = skb->sp->xvec[skb->sp->len - 1];
        aead = x->data;
        esph = ip_esp_hdr(skb);
 
        if (esph->spi != x->id.spi)
-               goto out;
+               return ERR_PTR(-EINVAL);
 
        if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
-               goto out;
+               return ERR_PTR(-EINVAL);
 
        __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
 
        skb->encap_hdr_csum = 1;
 
-       if (!(features & NETIF_F_HW_ESP))
+       if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
+           (x->xso.dev != skb->dev))
                esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
 
-       segs = x->outer_mode->gso_segment(x, skb, esp_features);
-       if (IS_ERR_OR_NULL(segs))
-               goto out;
-
-       __skb_pull(skb, skb->data - skb_mac_header(skb));
-
-       skb2 = segs;
-       do {
-               struct sk_buff *nskb = skb2->next;
-
-               xo = xfrm_offload(skb2);
-               xo->flags |= XFRM_GSO_SEGMENT;
-               xo->seq.low = seq;
-               xo->seq.hi = xfrm_replay_seqhi(x, seq);
+       xo->flags |= XFRM_GSO_SEGMENT;
 
-               if(!(features & NETIF_F_HW_ESP))
-                       xo->flags |= CRYPTO_FALLBACK;
-
-               x->outer_mode->xmit(x, skb2);
-
-               err = x->type_offload->xmit(x, skb2, esp_features);
-               if (err) {
-                       kfree_skb_list(segs);
-                       return ERR_PTR(err);
-               }
-
-               if (!skb_is_gso(skb2))
-                       seq++;
-               else
-                       seq += skb_shinfo(skb2)->gso_segs;
-
-               skb_push(skb2, skb2->mac_len);
-               skb2 = nskb;
-       } while (skb2);
-
-out:
-       return segs;
+       return x->outer_mode->gso_segment(x, skb, esp_features);
 }
 
 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -203,6 +164,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
        struct crypto_aead *aead;
        struct esp_info esp;
        bool hw_offload = true;
+       __u32 seq;
 
        esp.inplace = true;
 
@@ -241,23 +203,30 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
                        return esp.nfrags;
        }
 
+       seq = xo->seq.low;
+
        esph = esp.esph;
        esph->spi = x->id.spi;
 
        skb_push(skb, -skb_network_offset(skb));
 
        if (xo->flags & XFRM_GSO_SEGMENT) {
-               esph->seq_no = htonl(xo->seq.low);
-       } else {
-               ip_hdr(skb)->tot_len = htons(skb->len);
-               ip_send_check(ip_hdr(skb));
+               esph->seq_no = htonl(seq);
+
+               if (!skb_is_gso(skb))
+                       xo->seq.low++;
+               else
+                       xo->seq.low += skb_shinfo(skb)->gso_segs;
        }
 
+       esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
+
+       ip_hdr(skb)->tot_len = htons(skb->len);
+       ip_send_check(ip_hdr(skb));
+
        if (hw_offload)
                return 0;
 
-       esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
-
        err = esp_output_tail(x, skb, &esp);
        if (err)
                return err;
index f52d27a422c37298b2ad0c1dbba0e5307f1a46b6..08259d078b1ca821c581aeb34251c79a9aba8c8d 100644 (file)
@@ -1298,14 +1298,19 @@ err_table_hash_alloc:
 
 static void ip_fib_net_exit(struct net *net)
 {
-       unsigned int i;
+       int i;
 
        rtnl_lock();
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
        RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
 #endif
-       for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
+       /* Destroy the tables in reverse order to guarantee that the
+        * local table, ID 255, is destroyed before the main table, ID
+        * 254. This is necessary as the local table may contain
+        * references to data contained in the main table.
+        */
+       for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
                struct hlist_head *head = &net->ipv4.fib_table_hash[i];
                struct hlist_node *tmp;
                struct fib_table *tb;
index f04d944f8abe0bfbb840837bb35d28fe6d8d25d0..c586597da20dbb0e46eb0f693fd65bccfc8f3633 100644 (file)
@@ -698,7 +698,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
 
        nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
                int type = nla_type(nla);
-               u32 val;
+               u32 fi_val, val;
 
                if (!type)
                        continue;
@@ -715,7 +715,11 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
                        val = nla_get_u32(nla);
                }
 
-               if (fi->fib_metrics->metrics[type - 1] != val)
+               fi_val = fi->fib_metrics->metrics[type - 1];
+               if (type == RTAX_FEATURES)
+                       fi_val &= ~DST_FEATURE_ECN_CA;
+
+               if (fi_val != val)
                        return false;
        }
 
index 4ca46dc08e631c360cba851f166c6d2bcc0c983c..12410ec6f7f7ae528e1282134a1b9117006f9f33 100644 (file)
@@ -685,7 +685,7 @@ static void reqsk_timer_handler(struct timer_list *t)
        int max_retries, thresh;
        u8 defer_accept;
 
-       if (sk_state_load(sk_listener) != TCP_LISTEN)
+       if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
                goto drop;
 
        max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
@@ -783,7 +783,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
        if (newsk) {
                struct inet_connection_sock *newicsk = inet_csk(newsk);
 
-               newsk->sk_state = TCP_SYN_RECV;
+               inet_sk_set_state(newsk, TCP_SYN_RECV);
                newicsk->icsk_bind_hash = NULL;
 
                inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
@@ -877,7 +877,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
         * It is OK, because this socket enters to hash table only
         * after validation is complete.
         */
-       sk_state_store(sk, TCP_LISTEN);
+       inet_sk_state_store(sk, TCP_LISTEN);
        if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
                inet->inet_sport = htons(inet->inet_num);
 
@@ -888,7 +888,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
                        return 0;
        }
 
-       sk->sk_state = TCP_CLOSE;
+       inet_sk_set_state(sk, TCP_CLOSE);
        return err;
 }
 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
index f6f58108b4c5e4d5e393ee52cd50ca6e471b8ffe..37b7da0b975dc29754b471351a7d6d05b60de95a 100644 (file)
@@ -544,7 +544,7 @@ bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
        } else {
                percpu_counter_inc(sk->sk_prot->orphan_count);
-               sk->sk_state = TCP_CLOSE;
+               inet_sk_set_state(sk, TCP_CLOSE);
                sock_set_flag(sk, SOCK_DEAD);
                inet_csk_destroy_sock(sk);
        }
index fd4d6e96da7e953b8bb24758a5a691051a3e1e6a..b61f2285816d230baf54a604641837aa4f0018f2 100644 (file)
@@ -313,9 +313,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                                return PACKET_REJECT;
 
                        md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
-                       if (!md)
-                               return PACKET_REJECT;
-
                        memcpy(md, pkt_md, sizeof(*md));
                        md->version = ver;
 
@@ -434,11 +431,13 @@ static int gre_rcv(struct sk_buff *skb)
                     tpi.proto == htons(ETH_P_ERSPAN2))) {
                if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
                        return 0;
+               goto out;
        }
 
        if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
                return 0;
 
+out:
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 drop:
        kfree_skb(skb);
@@ -1332,6 +1331,7 @@ static const struct net_device_ops erspan_netdev_ops = {
 static void ipgre_tap_setup(struct net_device *dev)
 {
        ether_setup(dev);
+       dev->max_mtu = 0;
        dev->netdev_ops = &gre_tap_netdev_ops;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
index c470fec9062fef97b30d4410b1efd2e40d2e0416..7ac583a2b9fe09cebbd17baceca2019e06ea9d3e 100644 (file)
 #include <asm/ioctls.h>
 #include <net/busy_poll.h>
 
-#include <trace/events/tcp.h>
-
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
@@ -504,7 +502,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 
        sock_poll_wait(file, sk_sleep(sk), wait);
 
-       state = sk_state_load(sk);
+       state = inet_sk_state_load(sk);
        if (state == TCP_LISTEN)
                return inet_csk_listen_poll(sk);
 
@@ -1106,12 +1104,15 @@ static int linear_payload_sz(bool first_skb)
        return 0;
 }
 
-static int select_size(const struct sock *sk, bool sg, bool first_skb)
+static int select_size(const struct sock *sk, bool sg, bool first_skb, bool zc)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        int tmp = tp->mss_cache;
 
        if (sg) {
+               if (zc)
+                       return 0;
+
                if (sk_can_gso(sk)) {
                        tmp = linear_payload_sz(first_skb);
                } else {
@@ -1188,7 +1189,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
        int flags, err, copied = 0;
        int mss_now = 0, size_goal, copied_syn = 0;
        bool process_backlog = false;
-       bool sg;
+       bool sg, zc = false;
        long timeo;
 
        flags = msg->msg_flags;
@@ -1206,7 +1207,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
                        goto out_err;
                }
 
-               if (!(sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG))
+               zc = sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG;
+               if (!zc)
                        uarg->zerocopy = 0;
        }
 
@@ -1283,6 +1285,7 @@ restart:
 
                if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
                        bool first_skb;
+                       int linear;
 
 new_segment:
                        /* Allocate new segment. If the interface is SG,
@@ -1296,9 +1299,8 @@ new_segment:
                                goto restart;
                        }
                        first_skb = tcp_rtx_and_write_queues_empty(sk);
-                       skb = sk_stream_alloc_skb(sk,
-                                                 select_size(sk, sg, first_skb),
-                                                 sk->sk_allocation,
+                       linear = select_size(sk, sg, first_skb, zc);
+                       skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation,
                                                  first_skb);
                        if (!skb)
                                goto wait_for_memory;
@@ -1327,13 +1329,13 @@ new_segment:
                        copy = msg_data_left(msg);
 
                /* Where to copy to? */
-               if (skb_availroom(skb) > 0) {
+               if (skb_availroom(skb) > 0 && !zc) {
                        /* We have some space in skb head. Superb! */
                        copy = min_t(int, copy, skb_availroom(skb));
                        err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
                        if (err)
                                goto do_fault;
-               } else if (!uarg || !uarg->zerocopy) {
+               } else if (!zc) {
                        bool merge = true;
                        int i = skb_shinfo(skb)->nr_frags;
                        struct page_frag *pfrag = sk_page_frag(sk);
@@ -1373,8 +1375,10 @@ new_segment:
                        pfrag->offset += copy;
                } else {
                        err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
-                       if (err == -EMSGSIZE || err == -EEXIST)
+                       if (err == -EMSGSIZE || err == -EEXIST) {
+                               tcp_mark_push(tp, skb);
                                goto new_segment;
+                       }
                        if (err < 0)
                                goto do_error;
                        copy = err;
@@ -2040,8 +2044,6 @@ void tcp_set_state(struct sock *sk, int state)
 {
        int oldstate = sk->sk_state;
 
-       trace_tcp_set_state(sk, oldstate, state);
-
        switch (state) {
        case TCP_ESTABLISHED:
                if (oldstate != TCP_ESTABLISHED)
@@ -2065,7 +2067,7 @@ void tcp_set_state(struct sock *sk, int state)
        /* Change state AFTER socket is unhashed to avoid closed
         * socket sitting in hash tables.
         */
-       sk_state_store(sk, state);
+       inet_sk_state_store(sk, state);
 
 #ifdef STATE_TRACE
        SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
@@ -2920,7 +2922,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        if (sk->sk_type != SOCK_STREAM)
                return;
 
-       info->tcpi_state = sk_state_load(sk);
+       info->tcpi_state = inet_sk_state_load(sk);
 
        /* Report meaningful fields for all TCP states, including listeners */
        rate = READ_ONCE(sk->sk_pacing_rate);
index abbf0edcf6c26f3b39dffe46949875bebbdc7f51..81148f7a2323cf119337927ba881237f83e61492 100644 (file)
@@ -24,7 +24,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
 {
        struct tcp_info *info = _info;
 
-       if (sk_state_load(sk) == TCP_LISTEN) {
+       if (inet_sk_state_load(sk) == TCP_LISTEN) {
                r->idiag_rqueue = sk->sk_ack_backlog;
                r->idiag_wqueue = sk->sk_max_ack_backlog;
        } else if (sk->sk_type == SOCK_STREAM) {
index 94e28350f4205b1a57809d5471d7e2ade51f5196..5d203248123e1dedc6410dcb5856fd78bab96eb8 100644 (file)
@@ -1911,7 +1911,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
        /* Clean up the MD5 key list, if any */
        if (tp->md5sig_info) {
                tcp_clear_md5_list(sk);
-               kfree_rcu(tp->md5sig_info, rcu);
+               kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
                tp->md5sig_info = NULL;
        }
 #endif
@@ -2281,7 +2281,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
                timer_expires = jiffies;
        }
 
-       state = sk_state_load(sk);
+       state = inet_sk_state_load(sk);
        if (state == TCP_LISTEN)
                rx_queue = sk->sk_ack_backlog;
        else
index 7d885a44dc9dcf515612148c241a6ed915be3966..8affc6d83d584ef6dec5b5c43d4edb0d7c4a1412 100644 (file)
@@ -105,18 +105,15 @@ static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x,
 {
        __skb_push(skb, skb->mac_len);
        return skb_mac_gso_segment(skb, features);
-
 }
 
 static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
 {
        struct xfrm_offload *xo = xfrm_offload(skb);
 
-       if (xo->flags & XFRM_GSO_SEGMENT) {
-               skb->network_header = skb->network_header - x->props.header_len;
+       if (xo->flags & XFRM_GSO_SEGMENT)
                skb->transport_header = skb->network_header +
                                        sizeof(struct iphdr);
-       }
 
        skb_reset_mac_len(skb);
        pskb_pull(skb, skb->mac_len + x->props.header_len);
index c26f71234b9c01a82ec9d40423ee28957468ed46..c9441ca4539936486291147a47a84ef1b2ecf095 100644 (file)
@@ -210,7 +210,6 @@ lookup_protocol:
        np->mcast_hops  = IPV6_DEFAULT_MCASTHOPS;
        np->mc_loop     = 1;
        np->pmtudisc    = IPV6_PMTUDISC_WANT;
-       np->autoflowlabel = ip6_default_np_autolabel(net);
        np->repflow     = net->ipv6.sysctl.flowlabel_reflect;
        sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
 
index a902ff8f59be3ed7e1f28afc234a0e56eca4e684..7c888c6e53a97fdec1e1f0fa6f7800f08888fbf0 100644 (file)
@@ -141,14 +141,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
 static void esp_output_done(struct crypto_async_request *base, int err)
 {
        struct sk_buff *skb = base->data;
+       struct xfrm_offload *xo = xfrm_offload(skb);
        void *tmp;
-       struct dst_entry *dst = skb_dst(skb);
-       struct xfrm_state *x = dst->xfrm;
+       struct xfrm_state *x;
+
+       if (xo && (xo->flags & XFRM_DEV_RESUME))
+               x = skb->sp->xvec[skb->sp->len - 1];
+       else
+               x = skb_dst(skb)->xfrm;
 
        tmp = ESP_SKB_CB(skb)->tmp;
        esp_ssg_unref(x, tmp);
        kfree(tmp);
-       xfrm_output_resume(skb, err);
+
+       if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+               if (err) {
+                       XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+                       kfree_skb(skb);
+                       return;
+               }
+
+               skb_push(skb, skb->data - skb_mac_header(skb));
+               secpath_reset(skb);
+               xfrm_dev_resume(skb);
+       } else {
+               xfrm_output_resume(skb, err);
+       }
 }
 
 /* Move ESP header back into place. */
@@ -734,17 +752,13 @@ static int esp_init_aead(struct xfrm_state *x)
        char aead_name[CRYPTO_MAX_ALG_NAME];
        struct crypto_aead *aead;
        int err;
-       u32 mask = 0;
 
        err = -ENAMETOOLONG;
        if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
                     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
                goto error;
 
-       if (x->xso.offload_handle)
-               mask |= CRYPTO_ALG_ASYNC;
-
-       aead = crypto_alloc_aead(aead_name, 0, mask);
+       aead = crypto_alloc_aead(aead_name, 0, 0);
        err = PTR_ERR(aead);
        if (IS_ERR(aead))
                goto error;
@@ -774,7 +788,6 @@ static int esp_init_authenc(struct xfrm_state *x)
        char authenc_name[CRYPTO_MAX_ALG_NAME];
        unsigned int keylen;
        int err;
-       u32 mask = 0;
 
        err = -EINVAL;
        if (!x->ealg)
@@ -800,10 +813,7 @@ static int esp_init_authenc(struct xfrm_state *x)
                        goto error;
        }
 
-       if (x->xso.offload_handle)
-               mask |= CRYPTO_ALG_ASYNC;
-
-       aead = crypto_alloc_aead(authenc_name, 0, mask);
+       aead = crypto_alloc_aead(authenc_name, 0, 0);
        err = PTR_ERR(aead);
        if (IS_ERR(aead))
                goto error;
index 333a478aa1610441ce08e3ac82e92d3b48e3222d..0bb7d54cf2cb6c8696d954cc9a4a7356ed26e6fa 100644 (file)
@@ -135,75 +135,36 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
                                        netdev_features_t features)
 {
-       __u32 seq;
-       int err = 0;
-       struct sk_buff *skb2;
        struct xfrm_state *x;
        struct ip_esp_hdr *esph;
        struct crypto_aead *aead;
-       struct sk_buff *segs = ERR_PTR(-EINVAL);
        netdev_features_t esp_features = features;
        struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (!xo)
-               goto out;
-
-       seq = xo->seq.low;
+               return ERR_PTR(-EINVAL);
 
        x = skb->sp->xvec[skb->sp->len - 1];
        aead = x->data;
        esph = ip_esp_hdr(skb);
 
        if (esph->spi != x->id.spi)
-               goto out;
+               return ERR_PTR(-EINVAL);
 
        if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
-               goto out;
+               return ERR_PTR(-EINVAL);
 
        __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
 
        skb->encap_hdr_csum = 1;
 
-       if (!(features & NETIF_F_HW_ESP))
+       if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
+           (x->xso.dev != skb->dev))
                esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
 
-       segs = x->outer_mode->gso_segment(x, skb, esp_features);
-       if (IS_ERR_OR_NULL(segs))
-               goto out;
-
-       __skb_pull(skb, skb->data - skb_mac_header(skb));
-
-       skb2 = segs;
-       do {
-               struct sk_buff *nskb = skb2->next;
-
-               xo = xfrm_offload(skb2);
-               xo->flags |= XFRM_GSO_SEGMENT;
-               xo->seq.low = seq;
-               xo->seq.hi = xfrm_replay_seqhi(x, seq);
-
-               if(!(features & NETIF_F_HW_ESP))
-                       xo->flags |= CRYPTO_FALLBACK;
-
-               x->outer_mode->xmit(x, skb2);
-
-               err = x->type_offload->xmit(x, skb2, esp_features);
-               if (err) {
-                       kfree_skb_list(segs);
-                       return ERR_PTR(err);
-               }
-
-               if (!skb_is_gso(skb2))
-                       seq++;
-               else
-                       seq += skb_shinfo(skb2)->gso_segs;
-
-               skb_push(skb2, skb2->mac_len);
-               skb2 = nskb;
-       } while (skb2);
+       xo->flags |= XFRM_GSO_SEGMENT;
 
-out:
-       return segs;
+       return x->outer_mode->gso_segment(x, skb, esp_features);
 }
 
 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -222,6 +183,7 @@ static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
 
 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
 {
+       int len;
        int err;
        int alen;
        int blksize;
@@ -230,6 +192,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
        struct crypto_aead *aead;
        struct esp_info esp;
        bool hw_offload = true;
+       __u32 seq;
 
        esp.inplace = true;
 
@@ -265,28 +228,33 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
                        return esp.nfrags;
        }
 
+       seq = xo->seq.low;
+
        esph = ip_esp_hdr(skb);
        esph->spi = x->id.spi;
 
        skb_push(skb, -skb_network_offset(skb));
 
        if (xo->flags & XFRM_GSO_SEGMENT) {
-               esph->seq_no = htonl(xo->seq.low);
-       } else {
-               int len;
-
-               len = skb->len - sizeof(struct ipv6hdr);
-               if (len > IPV6_MAXPLEN)
-                       len = 0;
+               esph->seq_no = htonl(seq);
 
-               ipv6_hdr(skb)->payload_len = htons(len);
+               if (!skb_is_gso(skb))
+                       xo->seq.low++;
+               else
+                       xo->seq.low += skb_shinfo(skb)->gso_segs;
        }
 
+       esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
+
+       len = skb->len - sizeof(struct ipv6hdr);
+       if (len > IPV6_MAXPLEN)
+               len = 0;
+
+       ipv6_hdr(skb)->payload_len = htons(len);
+
        if (hw_offload)
                return 0;
 
-       esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
-
        err = esp6_output_tail(x, skb, &esp);
        if (err)
                return err;
index 87b9892dfa235ecc7cfcdeade3505781d826fb5f..b345b7e484c5ec4316f1994e4cd96ddf83cb5e06 100644 (file)
@@ -507,12 +507,11 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
        struct ip6_tnl *tunnel;
        u8 ver;
 
-       ipv6h = ipv6_hdr(skb);
-       ershdr = (struct erspan_base_hdr *)skb->data;
-
        if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
                return PACKET_REJECT;
 
+       ipv6h = ipv6_hdr(skb);
+       ershdr = (struct erspan_base_hdr *)skb->data;
        ver = (ntohs(ershdr->ver_vlan) & VER_MASK) >> VER_OFFSET;
        tpi->key = cpu_to_be32(ntohs(ershdr->session_id) & ID_MASK);
 
@@ -551,8 +550,6 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
 
                        info = &tun_dst->u.tun_info;
                        md = ip_tunnel_info_opts(info);
-                       if (!md)
-                               return PACKET_REJECT;
 
                        memcpy(md, pkt_md, sizeof(*md));
                        md->version = ver;
@@ -603,12 +600,13 @@ static int gre_rcv(struct sk_buff *skb)
                     tpi.proto == htons(ETH_P_ERSPAN2))) {
                if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
                        return 0;
-               goto drop;
+               goto out;
        }
 
        if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
                return 0;
 
+out:
        icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 drop:
        kfree_skb(skb);
@@ -1770,6 +1768,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
 
        ether_setup(dev);
 
+       dev->max_mtu = 0;
        dev->netdev_ops = &ip6gre_tap_netdev_ops;
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6gre_dev_free;
index 176d74fb3b4d82223cd0cbd7b589ceca8c968c9e..bcdb615aed6eec7606a7a884a8514b13db4c3f66 100644 (file)
@@ -138,6 +138,14 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
                return ret;
        }
 
+#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
+       /* Policy lookup after SNAT yielded a new policy */
+       if (skb_dst(skb)->xfrm) {
+               IPCB(skb)->flags |= IPSKB_REROUTED;
+               return dst_output(net, sk, skb);
+       }
+#endif
+
        if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
            dst_allfrag(skb_dst(skb)) ||
            (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
@@ -166,6 +174,14 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
 
+static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
+{
+       if (!np->autoflowlabel_set)
+               return ip6_default_np_autolabel(net);
+       else
+               return np->autoflowlabel;
+}
+
 /*
  * xmit an sk_buff (used by TCP, SCTP and DCCP)
  * Note : socket lock is not held for SYNACK packets, but might be modified
@@ -230,7 +246,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
                hlimit = ip6_dst_hoplimit(dst);
 
        ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
-                                                    np->autoflowlabel, fl6));
+                               ip6_autoflowlabel(net, np), fl6));
 
        hdr->payload_len = htons(seg_len);
        hdr->nexthdr = proto;
@@ -1626,7 +1642,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
 
        ip6_flow_hdr(hdr, v6_cork->tclass,
                     ip6_make_flowlabel(net, skb, fl6->flowlabel,
-                                       np->autoflowlabel, fl6));
+                                       ip6_autoflowlabel(net, np), fl6));
        hdr->hop_limit = v6_cork->hop_limit;
        hdr->nexthdr = proto;
        hdr->saddr = fl6->saddr;
index 6ff2f21ae3fc4a00690b67f07a0ccd24560b4924..8a4610e84e585f6a8f293fcbca62c54c01aa7ec4 100644 (file)
@@ -1126,8 +1126,13 @@ route_lookup:
                max_headroom += 8;
                mtu -= 8;
        }
-       if (mtu < IPV6_MIN_MTU)
-               mtu = IPV6_MIN_MTU;
+       if (skb->protocol == htons(ETH_P_IPV6)) {
+               if (mtu < IPV6_MIN_MTU)
+                       mtu = IPV6_MIN_MTU;
+       } else if (mtu < 576) {
+               mtu = 576;
+       }
+
        if (skb_dst(skb) && !t->parms.collect_md)
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
        if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
index dbb74f3c57a77f6c0fd8c337b8e127246c2d9b4e..18caa9539e6d7ba0ae0a9a415633da5f242e91c4 100644 (file)
@@ -626,6 +626,7 @@ static void vti6_link_config(struct ip6_tnl *t)
 {
        struct net_device *dev = t->dev;
        struct __ip6_tnl_parm *p = &t->parms;
+       struct net_device *tdev = NULL;
 
        memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
        memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
@@ -638,6 +639,25 @@ static void vti6_link_config(struct ip6_tnl *t)
                dev->flags |= IFF_POINTOPOINT;
        else
                dev->flags &= ~IFF_POINTOPOINT;
+
+       if (p->flags & IP6_TNL_F_CAP_XMIT) {
+               int strict = (ipv6_addr_type(&p->raddr) &
+                             (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
+               struct rt6_info *rt = rt6_lookup(t->net,
+                                                &p->raddr, &p->laddr,
+                                                p->link, strict);
+
+               if (rt)
+                       tdev = rt->dst.dev;
+               ip6_rt_put(rt);
+       }
+
+       if (!tdev && p->link)
+               tdev = __dev_get_by_index(t->net, p->link);
+
+       if (tdev)
+               dev->mtu = max_t(int, tdev->mtu - dev->hard_header_len,
+                                IPV6_MIN_MTU);
 }
 
 /**
index b9404feabd7857fe0873fbc4f346d281f3600807..2d4680e0376f41deee6c999eadaf9409353e0b4a 100644 (file)
@@ -886,6 +886,7 @@ pref_skip_coa:
                break;
        case IPV6_AUTOFLOWLABEL:
                np->autoflowlabel = valbool;
+               np->autoflowlabel_set = 1;
                retv = 0;
                break;
        case IPV6_RECVFRAGSIZE:
index b3f4d19b3ca5c540d50407eab6e87b23dc0c0b35..2490280b33942b2fc7617d1b8a0783e42d01fc17 100644 (file)
@@ -2336,6 +2336,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        }
 
        rt->dst.flags |= DST_HOST;
+       rt->dst.input = ip6_input;
        rt->dst.output  = ip6_output;
        rt->rt6i_gateway  = fl6->daddr;
        rt->rt6i_dst.addr = fl6->daddr;
@@ -4297,19 +4298,13 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                if (!ipv6_addr_any(&fl6.saddr))
                        flags |= RT6_LOOKUP_F_HAS_SADDR;
 
-               if (!fibmatch)
-                       dst = ip6_route_input_lookup(net, dev, &fl6, flags);
-               else
-                       dst = ip6_route_lookup(net, &fl6, 0);
+               dst = ip6_route_input_lookup(net, dev, &fl6, flags);
 
                rcu_read_unlock();
        } else {
                fl6.flowi6_oif = oif;
 
-               if (!fibmatch)
-                       dst = ip6_route_output(net, NULL, &fl6);
-               else
-                       dst = ip6_route_lookup(net, &fl6, 0);
+               dst = ip6_route_output(net, NULL, &fl6);
        }
 
 
@@ -4326,6 +4321,14 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                goto errout;
        }
 
+       if (fibmatch && rt->from) {
+               struct rt6_info *ort = rt->from;
+
+               dst_hold(&ort->dst);
+               ip6_rt_put(rt);
+               rt = ort;
+       }
+
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb) {
                ip6_rt_put(rt);
index 7178476b3d2f64f01832fe3292c7dec849ec2265..aa12a26a96c6907a19f7759048649ec5a2384586 100644 (file)
@@ -1795,7 +1795,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
                timer_expires = jiffies;
        }
 
-       state = sk_state_load(sp);
+       state = inet_sk_state_load(sp);
        if (state == TCP_LISTEN)
                rx_queue = sp->sk_ack_backlog;
        else
index e66b94f465320e6abf49b91f41a6687f1f015e3d..4e12859bc2ee26ef25594d7b1d33fe61a2e66130 100644 (file)
@@ -105,17 +105,14 @@ static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x,
 {
        __skb_push(skb, skb->mac_len);
        return skb_mac_gso_segment(skb, features);
-
 }
 
 static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
 {
        struct xfrm_offload *xo = xfrm_offload(skb);
 
-       if (xo->flags & XFRM_GSO_SEGMENT) {
-               skb->network_header = skb->network_header - x->props.header_len;
+       if (xo->flags & XFRM_GSO_SEGMENT)
                skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
-       }
 
        skb_reset_mac_len(skb);
        pskb_pull(skb, skb->mac_len + x->props.header_len);
index 115918ad8eca5ac27925d94aa17a91b9b2c17659..6ff64717da1edd324d3d6d9fafe2421b10d989e8 100644 (file)
@@ -792,7 +792,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                        ptr += 2 + offset;
                }
        } else
-               ptr += session->offset;
+               ptr += session->peer_offset;
 
        offset = ptr - optr;
        if (!pskb_may_pull(skb, offset))
@@ -1785,6 +1785,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
                        session->lns_mode = cfg->lns_mode;
                        session->reorder_timeout = cfg->reorder_timeout;
                        session->offset = cfg->offset;
+                       session->peer_offset = cfg->peer_offset;
                        session->l2specific_type = cfg->l2specific_type;
                        session->l2specific_len = cfg->l2specific_len;
                        session->cookie_len = cfg->cookie_len;
index 9534e16965ccbd87a4e8245df7dc079bdb7931eb..c6fe7cc42a0586b199fc562665517d2f84c714c5 100644 (file)
@@ -59,7 +59,8 @@ struct l2tp_session_cfg {
        int                     debug;          /* bitmask of debug message
                                                 * categories */
        u16                     vlan_id;        /* VLAN pseudowire only */
-       u16                     offset;         /* offset to payload */
+       u16                     offset;         /* offset to tx payload */
+       u16                     peer_offset;    /* offset to rx payload */
        u16                     l2specific_len; /* Layer 2 specific length */
        u16                     l2specific_type; /* Layer 2 specific type */
        u8                      cookie[8];      /* optional cookie */
@@ -86,8 +87,14 @@ struct l2tp_session {
        int                     cookie_len;
        u8                      peer_cookie[8];
        int                     peer_cookie_len;
-       u16                     offset;         /* offset from end of L2TP header
-                                                  to beginning of data */
+       u16                     offset;         /* offset from end of L2TP
+                                                * header to beginning of
+                                                * tx data
+                                                */
+       u16                     peer_offset;    /* offset from end of L2TP
+                                                * header to beginning of
+                                                * rx data
+                                                */
        u16                     l2specific_len;
        u16                     l2specific_type;
        u16                     hdr_len;
index eb69411bcb47a99040256536ff4684aac98d35b4..4cc30b38aba49eceefc72ecc4f0352f6dfbac47d 100644 (file)
@@ -180,8 +180,9 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
                   session->lns_mode ? "LNS" : "LAC",
                   session->debug,
                   jiffies_to_msecs(session->reorder_timeout));
-       seq_printf(m, "   offset %hu l2specific %hu/%hu\n",
-                  session->offset, session->l2specific_type, session->l2specific_len);
+       seq_printf(m, "   offset %hu peer_offset %hu l2specific %hu/%hu\n",
+                  session->offset, session->peer_offset,
+                  session->l2specific_type, session->l2specific_len);
        if (session->cookie_len) {
                seq_printf(m, "   cookie %02x%02x%02x%02x",
                           session->cookie[0], session->cookie[1],
@@ -228,7 +229,8 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
                seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
                seq_puts(m, "  SESSION ID, peer ID, PWTYPE\n");
                seq_puts(m, "   refcnt cnt\n");
-               seq_puts(m, "   offset OFFSET l2specific TYPE/LEN\n");
+               seq_puts(m, "   offset OFFSET peer_offset OFFSET");
+               seq_puts(m, " l2specific TYPE/LEN\n");
                seq_puts(m, "   [ cookie ]\n");
                seq_puts(m, "   [ peer cookie ]\n");
                seq_puts(m, "   config mtu/mru/rcvseq/sendseq/dataseq/lns debug reorderto\n");
index a1f24fb2be980b204f83de977c452f3027d1cfa8..d7d4d7a7a54d492121467178801e3e0921f0491f 100644 (file)
@@ -547,9 +547,25 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
        }
 
        if (tunnel->version > 2) {
-               if (info->attrs[L2TP_ATTR_OFFSET])
+               if (info->attrs[L2TP_ATTR_PEER_OFFSET]) {
+                       struct nlattr *peer_offset;
+
+                       peer_offset = info->attrs[L2TP_ATTR_PEER_OFFSET];
+                       cfg.peer_offset = nla_get_u16(peer_offset);
+               }
+
+               if (info->attrs[L2TP_ATTR_OFFSET]) {
                        cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]);
 
+                       /* in order to maintain compatibility with older
+                        * versions where offset was used for both tx and
+                        * rx side, update rx side with offset if peer_offset
+                        * is not provided by userspace
+                        */
+                       if (!info->attrs[L2TP_ATTR_PEER_OFFSET])
+                               cfg.peer_offset = cfg.offset;
+               }
+
                if (info->attrs[L2TP_ATTR_DATA_SEQ])
                        cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
 
@@ -761,6 +777,10 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
 
        if ((session->ifname[0] &&
             nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
+           (session->offset &&
+            nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) ||
+           (session->peer_offset &&
+            nla_put_u16(skb, L2TP_ATTR_PEER_OFFSET, session->peer_offset)) ||
            (session->cookie_len &&
             nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
                     &session->cookie[0])) ||
@@ -901,6 +921,7 @@ static const struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
        [L2TP_ATTR_PW_TYPE]             = { .type = NLA_U16, },
        [L2TP_ATTR_ENCAP_TYPE]          = { .type = NLA_U16, },
        [L2TP_ATTR_OFFSET]              = { .type = NLA_U16, },
+       [L2TP_ATTR_PEER_OFFSET]         = { .type = NLA_U16, },
        [L2TP_ATTR_DATA_SEQ]            = { .type = NLA_U8, },
        [L2TP_ATTR_L2SPEC_TYPE]         = { .type = NLA_U8, },
        [L2TP_ATTR_L2SPEC_LEN]          = { .type = NLA_U8, },
index 76d050aba7a481ff8e0229b810d563a0eeaa0ec3..56b8e7167790fe3f240cc5120889a67ba37c168d 100644 (file)
@@ -579,6 +579,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
                        return -EINVAL;
 
                skb_reset_network_header(skb);
+               key->eth.type = skb->protocol;
        } else {
                eth = eth_hdr(skb);
                ether_addr_copy(key->eth.src, eth->h_source);
@@ -592,15 +593,23 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
                if (unlikely(parse_vlan(skb, key)))
                        return -ENOMEM;
 
-               skb->protocol = parse_ethertype(skb);
-               if (unlikely(skb->protocol == htons(0)))
+               key->eth.type = parse_ethertype(skb);
+               if (unlikely(key->eth.type == htons(0)))
                        return -ENOMEM;
 
+               /* Multiple tagged packets need to retain TPID to satisfy
+                * skb_vlan_pop(), which will later shift the ethertype into
+                * skb->protocol.
+                */
+               if (key->eth.cvlan.tci & htons(VLAN_TAG_PRESENT))
+                       skb->protocol = key->eth.cvlan.tpid;
+               else
+                       skb->protocol = key->eth.type;
+
                skb_reset_network_header(skb);
                __skb_push(skb, skb->data - skb_mac_header(skb));
        }
        skb_reset_mac_len(skb);
-       key->eth.type = skb->protocol;
 
        /* Network layer. */
        if (key->eth.type == htons(ETH_P_IP)) {
index da215e5c139928132497c952aea91c61929e70b6..ee7aa0ba3a677b3eca4bc967ccd504089c991562 100644 (file)
@@ -247,12 +247,13 @@ static int packet_direct_xmit(struct sk_buff *skb)
        struct sk_buff *orig_skb = skb;
        struct netdev_queue *txq;
        int ret = NETDEV_TX_BUSY;
+       bool again = false;
 
        if (unlikely(!netif_running(dev) ||
                     !netif_carrier_ok(dev)))
                goto drop;
 
-       skb = validate_xmit_skb_list(skb, dev);
+       skb = validate_xmit_skb_list(skb, dev, &again);
        if (skb != orig_skb)
                goto drop;
 
index 75d43dc8e96b4655becce35dcaa5dd570abf4a41..5aa3a64aa4f0ef254bd31ecbfb11c81db0438185 100644 (file)
@@ -114,6 +114,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
                          rs, &addr, (int)ntohs(*port));
                        break;
                } else {
+                       rs->rs_bound_addr = 0;
                        rds_sock_put(rs);
                        ret = -ENOMEM;
                        break;
index 39f502d47969d9dcb30fc1f87640325a0f4c260d..2e554ef6d75fec7fdaab93a1347e2ecf1b39c545 100644 (file)
@@ -270,16 +270,33 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr)
        return -EADDRNOTAVAIL;
 }
 
+static void rds_tcp_conn_free(void *arg)
+{
+       struct rds_tcp_connection *tc = arg;
+       unsigned long flags;
+
+       rdsdebug("freeing tc %p\n", tc);
+
+       spin_lock_irqsave(&rds_tcp_conn_lock, flags);
+       if (!tc->t_tcp_node_detached)
+               list_del(&tc->t_tcp_node);
+       spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
+
+       kmem_cache_free(rds_tcp_conn_slab, tc);
+}
+
 static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
 {
        struct rds_tcp_connection *tc;
-       int i;
+       int i, j;
+       int ret = 0;
 
        for (i = 0; i < RDS_MPATH_WORKERS; i++) {
                tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
-               if (!tc)
-                       return -ENOMEM;
-
+               if (!tc) {
+                       ret = -ENOMEM;
+                       break;
+               }
                mutex_init(&tc->t_conn_path_lock);
                tc->t_sock = NULL;
                tc->t_tinc = NULL;
@@ -290,27 +307,17 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
                tc->t_cpath = &conn->c_path[i];
 
                spin_lock_irq(&rds_tcp_conn_lock);
+               tc->t_tcp_node_detached = false;
                list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
                spin_unlock_irq(&rds_tcp_conn_lock);
                rdsdebug("rds_conn_path [%d] tc %p\n", i,
                         conn->c_path[i].cp_transport_data);
        }
-
-       return 0;
-}
-
-static void rds_tcp_conn_free(void *arg)
-{
-       struct rds_tcp_connection *tc = arg;
-       unsigned long flags;
-       rdsdebug("freeing tc %p\n", tc);
-
-       spin_lock_irqsave(&rds_tcp_conn_lock, flags);
-       if (!tc->t_tcp_node_detached)
-               list_del(&tc->t_tcp_node);
-       spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
-
-       kmem_cache_free(rds_tcp_conn_slab, tc);
+       if (ret) {
+               for (j = 0; j < i; j++)
+                       rds_tcp_conn_free(conn->c_path[j].cp_transport_data);
+       }
+       return ret;
 }
 
 static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
index bf483db993a1e3a414c8ba54f1bf10f0501a8cd9..95d3c9097b2516507826702b471563b07722b9f6 100644 (file)
@@ -118,13 +118,13 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
        police = to_police(*a);
        if (parm->rate.rate) {
                err = -ENOMEM;
-               R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE]);
+               R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
                if (R_tab == NULL)
                        goto failure;
 
                if (parm->peakrate.rate) {
                        P_tab = qdisc_get_rtab(&parm->peakrate,
-                                              tb[TCA_POLICE_PEAKRATE]);
+                                              tb[TCA_POLICE_PEAKRATE], NULL);
                        if (P_tab == NULL)
                                goto failure;
                }
index 32b1ea7cf863444586bba3eafcafc9a06b5993f7..4591b87eaab589100492617563fe15b186f3e8ba 100644 (file)
@@ -281,20 +281,24 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
 }
 
 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-                     struct tcf_block_ext_info *ei)
+                     struct tcf_block_ext_info *ei,
+                     struct netlink_ext_ack *extack)
 {
        struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
        struct tcf_chain *chain;
        int err;
 
-       if (!block)
+       if (!block) {
+               NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
                return -ENOMEM;
+       }
        INIT_LIST_HEAD(&block->chain_list);
        INIT_LIST_HEAD(&block->cb_list);
 
        /* Create chain 0 by default, it has to be always present. */
        chain = tcf_chain_create(block, 0);
        if (!chain) {
+               NL_SET_ERR_MSG(extack, "Failed to create new tcf chain");
                err = -ENOMEM;
                goto err_chain_create;
        }
@@ -321,7 +325,8 @@ static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
 }
 
 int tcf_block_get(struct tcf_block **p_block,
-                 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
+                 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+                 struct netlink_ext_ack *extack)
 {
        struct tcf_block_ext_info ei = {
                .chain_head_change = tcf_chain_head_change_dflt,
@@ -329,7 +334,7 @@ int tcf_block_get(struct tcf_block **p_block,
        };
 
        WARN_ON(!p_filter_chain);
-       return tcf_block_get_ext(p_block, q, &ei);
+       return tcf_block_get_ext(p_block, q, &ei, extack);
 }
 EXPORT_SYMBOL(tcf_block_get);
 
@@ -793,7 +798,7 @@ replay:
        }
 
        /* And the last stroke */
-       block = cops->tcf_block(q, cl);
+       block = cops->tcf_block(q, cl, extack);
        if (!block) {
                err = -EINVAL;
                goto errout;
@@ -1040,7 +1045,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
                if (cl == 0)
                        goto out;
        }
-       block = cops->tcf_block(q, cl);
+       block = cops->tcf_block(q, cl, NULL);
        if (!block)
                goto out;
 
index 6fe798c2df1a5303cd61cd3ad53cd2f9385d16de..8d78e7f4ecc33082517aaab5767a30c119f49dc0 100644 (file)
@@ -42,7 +42,6 @@ struct cls_bpf_prog {
        struct list_head link;
        struct tcf_result res;
        bool exts_integrated;
-       bool offloaded;
        u32 gen_flags;
        struct tcf_exts exts;
        u32 handle;
@@ -148,33 +147,37 @@ static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
 }
 
 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
-                              enum tc_clsbpf_command cmd)
+                              struct cls_bpf_prog *oldprog)
 {
-       bool addorrep = cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE;
        struct tcf_block *block = tp->chain->block;
-       bool skip_sw = tc_skip_sw(prog->gen_flags);
        struct tc_cls_bpf_offload cls_bpf = {};
+       struct cls_bpf_prog *obj;
+       bool skip_sw;
        int err;
 
+       skip_sw = prog && tc_skip_sw(prog->gen_flags);
+       obj = prog ?: oldprog;
+
        tc_cls_common_offload_init(&cls_bpf.common, tp);
-       cls_bpf.command = cmd;
-       cls_bpf.exts = &prog->exts;
-       cls_bpf.prog = prog->filter;
-       cls_bpf.name = prog->bpf_name;
-       cls_bpf.exts_integrated = prog->exts_integrated;
-       cls_bpf.gen_flags = prog->gen_flags;
+       cls_bpf.command = TC_CLSBPF_OFFLOAD;
+       cls_bpf.exts = &obj->exts;
+       cls_bpf.prog = prog ? prog->filter : NULL;
+       cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
+       cls_bpf.name = obj->bpf_name;
+       cls_bpf.exts_integrated = obj->exts_integrated;
+       cls_bpf.gen_flags = obj->gen_flags;
 
        err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
-       if (addorrep) {
+       if (prog) {
                if (err < 0) {
-                       cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
+                       cls_bpf_offload_cmd(tp, oldprog, prog);
                        return err;
                } else if (err > 0) {
                        prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
                }
        }
 
-       if (addorrep && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
+       if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
                return -EINVAL;
 
        return 0;
@@ -183,38 +186,17 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
                           struct cls_bpf_prog *oldprog)
 {
-       struct cls_bpf_prog *obj = prog;
-       enum tc_clsbpf_command cmd;
-       bool skip_sw;
-       int ret;
-
-       skip_sw = tc_skip_sw(prog->gen_flags) ||
-               (oldprog && tc_skip_sw(oldprog->gen_flags));
-
-       if (oldprog && oldprog->offloaded) {
-               if (!tc_skip_hw(prog->gen_flags)) {
-                       cmd = TC_CLSBPF_REPLACE;
-               } else if (!tc_skip_sw(prog->gen_flags)) {
-                       obj = oldprog;
-                       cmd = TC_CLSBPF_DESTROY;
-               } else {
-                       return -EINVAL;
-               }
-       } else {
-               if (tc_skip_hw(prog->gen_flags))
-                       return skip_sw ? -EINVAL : 0;
-               cmd = TC_CLSBPF_ADD;
-       }
-
-       ret = cls_bpf_offload_cmd(tp, obj, cmd);
-       if (ret)
-               return ret;
+       if (prog && oldprog && prog->gen_flags != oldprog->gen_flags)
+               return -EINVAL;
 
-       obj->offloaded = true;
-       if (oldprog)
-               oldprog->offloaded = false;
+       if (prog && tc_skip_hw(prog->gen_flags))
+               prog = NULL;
+       if (oldprog && tc_skip_hw(oldprog->gen_flags))
+               oldprog = NULL;
+       if (!prog && !oldprog)
+               return 0;
 
-       return 0;
+       return cls_bpf_offload_cmd(tp, prog, oldprog);
 }
 
 static void cls_bpf_stop_offload(struct tcf_proto *tp,
@@ -222,25 +204,26 @@ static void cls_bpf_stop_offload(struct tcf_proto *tp,
 {
        int err;
 
-       if (!prog->offloaded)
-               return;
-
-       err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
-       if (err) {
+       err = cls_bpf_offload_cmd(tp, NULL, prog);
+       if (err)
                pr_err("Stopping hardware offload failed: %d\n", err);
-               return;
-       }
-
-       prog->offloaded = false;
 }
 
 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
                                         struct cls_bpf_prog *prog)
 {
-       if (!prog->offloaded)
-               return;
+       struct tcf_block *block = tp->chain->block;
+       struct tc_cls_bpf_offload cls_bpf = {};
+
+       tc_cls_common_offload_init(&cls_bpf.common, tp);
+       cls_bpf.command = TC_CLSBPF_STATS;
+       cls_bpf.exts = &prog->exts;
+       cls_bpf.prog = prog->filter;
+       cls_bpf.name = prog->bpf_name;
+       cls_bpf.exts_integrated = prog->exts_integrated;
+       cls_bpf.gen_flags = prog->gen_flags;
 
-       cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
+       tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
 }
 
 static int cls_bpf_init(struct tcf_proto *tp)
index 74c22b4e365e4b6b267854632418ad98c266d4cb..3a3a1da6b07188a1a347714dc81a45680bd53719 100644 (file)
@@ -393,13 +393,16 @@ static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
 static struct qdisc_rate_table *qdisc_rtab_list;
 
 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
-                                       struct nlattr *tab)
+                                       struct nlattr *tab,
+                                       struct netlink_ext_ack *extack)
 {
        struct qdisc_rate_table *rtab;
 
        if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
-           nla_len(tab) != TC_RTAB_SIZE)
+           nla_len(tab) != TC_RTAB_SIZE) {
+               NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
                return NULL;
+       }
 
        for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
                if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
@@ -418,6 +421,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
                        r->linklayer = __detect_linklayer(r, rtab->data);
                rtab->next = qdisc_rtab_list;
                qdisc_rtab_list = rtab;
+       } else {
+               NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
        }
        return rtab;
 }
@@ -449,7 +454,8 @@ static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
        [TCA_STAB_DATA] = { .type = NLA_BINARY },
 };
 
-static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
+static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
+                                              struct netlink_ext_ack *extack)
 {
        struct nlattr *tb[TCA_STAB_MAX + 1];
        struct qdisc_size_table *stab;
@@ -458,23 +464,29 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
        u16 *tab = NULL;
        int err;
 
-       err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, NULL);
+       err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, extack);
        if (err < 0)
                return ERR_PTR(err);
-       if (!tb[TCA_STAB_BASE])
+       if (!tb[TCA_STAB_BASE]) {
+               NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
                return ERR_PTR(-EINVAL);
+       }
 
        s = nla_data(tb[TCA_STAB_BASE]);
 
        if (s->tsize > 0) {
-               if (!tb[TCA_STAB_DATA])
+               if (!tb[TCA_STAB_DATA]) {
+                       NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
                        return ERR_PTR(-EINVAL);
+               }
                tab = nla_data(tb[TCA_STAB_DATA]);
                tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
        }
 
-       if (tsize != s->tsize || (!tab && tsize > 0))
+       if (tsize != s->tsize || (!tab && tsize > 0)) {
+               NL_SET_ERR_MSG(extack, "Invalid size of size table");
                return ERR_PTR(-EINVAL);
+       }
 
        list_for_each_entry(stab, &qdisc_stab_list, list) {
                if (memcmp(&stab->szopts, s, sizeof(*s)))
@@ -669,7 +681,7 @@ int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
        unsigned int size = 4;
 
        clhash->hash = qdisc_class_hash_alloc(size);
-       if (clhash->hash == NULL)
+       if (!clhash->hash)
                return -ENOMEM;
        clhash->hashsize  = size;
        clhash->hashmask  = size - 1;
@@ -899,7 +911,8 @@ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
 
 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
                       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
-                      struct Qdisc *new, struct Qdisc *old)
+                      struct Qdisc *new, struct Qdisc *old,
+                      struct netlink_ext_ack *extack)
 {
        struct Qdisc *q = old;
        struct net *net = dev_net(dev);
@@ -914,8 +927,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
                    (new && new->flags & TCQ_F_INGRESS)) {
                        num_q = 1;
                        ingress = 1;
-                       if (!dev_ingress_queue(dev))
+                       if (!dev_ingress_queue(dev)) {
+                               NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
                                return -ENOENT;
+                       }
                }
 
                if (dev->flags & IFF_UP)
@@ -966,10 +981,13 @@ skip:
                if (cops && cops->graft) {
                        unsigned long cl = cops->find(parent, classid);
 
-                       if (cl)
-                               err = cops->graft(parent, cl, new, &old);
-                       else
+                       if (cl) {
+                               err = cops->graft(parent, cl, new, &old,
+                                                 extack);
+                       } else {
+                               NL_SET_ERR_MSG(extack, "Specified class not found");
                                err = -ENOENT;
+                       }
                }
                if (!err)
                        notify_and_destroy(net, skb, n, classid, old, new);
@@ -990,7 +1008,8 @@ static struct lock_class_key qdisc_rx_lock;
 static struct Qdisc *qdisc_create(struct net_device *dev,
                                  struct netdev_queue *dev_queue,
                                  struct Qdisc *p, u32 parent, u32 handle,
-                                 struct nlattr **tca, int *errp)
+                                 struct nlattr **tca, int *errp,
+                                 struct netlink_ext_ack *extack)
 {
        int err;
        struct nlattr *kind = tca[TCA_KIND];
@@ -1028,10 +1047,12 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
 #endif
 
        err = -ENOENT;
-       if (!ops)
+       if (!ops) {
+               NL_SET_ERR_MSG(extack, "Specified qdisc not found");
                goto err_out;
+       }
 
-       sch = qdisc_alloc(dev_queue, ops);
+       sch = qdisc_alloc(dev_queue, ops, extack);
        if (IS_ERR(sch)) {
                err = PTR_ERR(sch);
                goto err_out2;
@@ -1069,7 +1090,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
        }
 
        if (ops->init) {
-               err = ops->init(sch, tca[TCA_OPTIONS]);
+               err = ops->init(sch, tca[TCA_OPTIONS], extack);
                if (err != 0)
                        goto err_out5;
        }
@@ -1086,7 +1107,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
        }
 
        if (tca[TCA_STAB]) {
-               stab = qdisc_get_stab(tca[TCA_STAB]);
+               stab = qdisc_get_stab(tca[TCA_STAB], extack);
                if (IS_ERR(stab)) {
                        err = PTR_ERR(stab);
                        goto err_out4;
@@ -1097,8 +1118,10 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
                seqcount_t *running;
 
                err = -EOPNOTSUPP;
-               if (sch->flags & TCQ_F_MQROOT)
+               if (sch->flags & TCQ_F_MQROOT) {
+                       NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
                        goto err_out4;
+               }
 
                if (sch->parent != TC_H_ROOT &&
                    !(sch->flags & TCQ_F_INGRESS) &&
@@ -1113,8 +1136,10 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
                                        NULL,
                                        running,
                                        tca[TCA_RATE]);
-               if (err)
+               if (err) {
+                       NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
                        goto err_out4;
+               }
        }
 
        qdisc_hash_add(sch, false);
@@ -1147,21 +1172,24 @@ err_out4:
        goto err_out3;
 }
 
-static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
+static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
+                       struct netlink_ext_ack *extack)
 {
        struct qdisc_size_table *ostab, *stab = NULL;
        int err = 0;
 
        if (tca[TCA_OPTIONS]) {
-               if (!sch->ops->change)
+               if (!sch->ops->change) {
+                       NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
                        return -EINVAL;
-               err = sch->ops->change(sch, tca[TCA_OPTIONS]);
+               }
+               err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
                if (err)
                        return err;
        }
 
        if (tca[TCA_STAB]) {
-               stab = qdisc_get_stab(tca[TCA_STAB]);
+               stab = qdisc_get_stab(tca[TCA_STAB], extack);
                if (IS_ERR(stab))
                        return PTR_ERR(stab);
        }
@@ -1259,8 +1287,10 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
                if (clid != TC_H_ROOT) {
                        if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
                                p = qdisc_lookup(dev, TC_H_MAJ(clid));
-                               if (!p)
+                               if (!p) {
+                                       NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
                                        return -ENOENT;
+                               }
                                q = qdisc_leaf(p, clid);
                        } else if (dev_ingress_queue(dev)) {
                                q = dev_ingress_queue(dev)->qdisc_sleeping;
@@ -1268,26 +1298,38 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
                } else {
                        q = dev->qdisc;
                }
-               if (!q)
+               if (!q) {
+                       NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
                        return -ENOENT;
+               }
 
-               if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
+               if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
+                       NL_SET_ERR_MSG(extack, "Invalid handle");
                        return -EINVAL;
+               }
        } else {
                q = qdisc_lookup(dev, tcm->tcm_handle);
-               if (!q)
+               if (!q) {
+                       NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
                        return -ENOENT;
+               }
        }
 
-       if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
+       if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+               NL_SET_ERR_MSG(extack, "Invalid qdisc name");
                return -EINVAL;
+       }
 
        if (n->nlmsg_type == RTM_DELQDISC) {
-               if (!clid)
+               if (!clid) {
+                       NL_SET_ERR_MSG(extack, "Classid cannot be zero");
                        return -EINVAL;
-               if (q->handle == 0)
+               }
+               if (q->handle == 0) {
+                       NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
                        return -ENOENT;
-               err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
+               }
+               err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
                if (err != 0)
                        return err;
        } else {
@@ -1333,8 +1375,10 @@ replay:
                if (clid != TC_H_ROOT) {
                        if (clid != TC_H_INGRESS) {
                                p = qdisc_lookup(dev, TC_H_MAJ(clid));
-                               if (!p)
+                               if (!p) {
+                                       NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
                                        return -ENOENT;
+                               }
                                q = qdisc_leaf(p, clid);
                        } else if (dev_ingress_queue_create(dev)) {
                                q = dev_ingress_queue(dev)->qdisc_sleeping;
@@ -1349,21 +1393,33 @@ replay:
 
                if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
                        if (tcm->tcm_handle) {
-                               if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
+                               if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
+                                       NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
                                        return -EEXIST;
-                               if (TC_H_MIN(tcm->tcm_handle))
+                               }
+                               if (TC_H_MIN(tcm->tcm_handle)) {
+                                       NL_SET_ERR_MSG(extack, "Invalid minor handle");
                                        return -EINVAL;
+                               }
                                q = qdisc_lookup(dev, tcm->tcm_handle);
-                               if (!q)
+                               if (!q) {
+                                       NL_SET_ERR_MSG(extack, "No qdisc found for specified handle");
                                        goto create_n_graft;
-                               if (n->nlmsg_flags & NLM_F_EXCL)
+                               }
+                               if (n->nlmsg_flags & NLM_F_EXCL) {
+                                       NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
                                        return -EEXIST;
+                               }
                                if (tca[TCA_KIND] &&
-                                   nla_strcmp(tca[TCA_KIND], q->ops->id))
+                                   nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+                                       NL_SET_ERR_MSG(extack, "Invalid qdisc name");
                                        return -EINVAL;
+                               }
                                if (q == p ||
-                                   (p && check_loop(q, p, 0)))
+                                   (p && check_loop(q, p, 0))) {
+                                       NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
                                        return -ELOOP;
+                               }
                                qdisc_refcount_inc(q);
                                goto graft;
                        } else {
@@ -1398,33 +1454,45 @@ replay:
                        }
                }
        } else {
-               if (!tcm->tcm_handle)
+               if (!tcm->tcm_handle) {
+                       NL_SET_ERR_MSG(extack, "Handle cannot be zero");
                        return -EINVAL;
+               }
                q = qdisc_lookup(dev, tcm->tcm_handle);
        }
 
        /* Change qdisc parameters */
-       if (!q)
+       if (!q) {
+               NL_SET_ERR_MSG(extack, "Specified qdisc not found");
                return -ENOENT;
-       if (n->nlmsg_flags & NLM_F_EXCL)
+       }
+       if (n->nlmsg_flags & NLM_F_EXCL) {
+               NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
                return -EEXIST;
-       if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
+       }
+       if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+               NL_SET_ERR_MSG(extack, "Invalid qdisc name");
                return -EINVAL;
-       err = qdisc_change(q, tca);
+       }
+       err = qdisc_change(q, tca, extack);
        if (err == 0)
                qdisc_notify(net, skb, n, clid, NULL, q);
        return err;
 
 create_n_graft:
-       if (!(n->nlmsg_flags & NLM_F_CREATE))
+       if (!(n->nlmsg_flags & NLM_F_CREATE)) {
+               NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
                return -ENOENT;
+       }
        if (clid == TC_H_INGRESS) {
-               if (dev_ingress_queue(dev))
+               if (dev_ingress_queue(dev)) {
                        q = qdisc_create(dev, dev_ingress_queue(dev), p,
                                         tcm->tcm_parent, tcm->tcm_parent,
-                                        tca, &err);
-               else
+                                        tca, &err, extack);
+               } else {
+                       NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
                        err = -ENOENT;
+               }
        } else {
                struct netdev_queue *dev_queue;
 
@@ -1437,7 +1505,7 @@ create_n_graft:
 
                q = qdisc_create(dev, dev_queue, p,
                                 tcm->tcm_parent, tcm->tcm_handle,
-                                tca, &err);
+                                tca, &err, extack);
        }
        if (q == NULL) {
                if (err == -EAGAIN)
@@ -1446,7 +1514,7 @@ create_n_graft:
        }
 
 graft:
-       err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
+       err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
        if (err) {
                if (q)
                        qdisc_destroy(q);
@@ -1698,7 +1766,7 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
        cl = cops->find(q, portid);
        if (!cl)
                return;
-       block = cops->tcf_block(q, cl);
+       block = cops->tcf_block(q, cl, NULL);
        if (!block)
                return;
        list_for_each_entry(chain, &block->chain_list, list) {
@@ -1845,7 +1913,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
        new_cl = cl;
        err = -EOPNOTSUPP;
        if (cops->change)
-               err = cops->change(q, clid, portid, tca, &new_cl);
+               err = cops->change(q, clid, portid, tca, &new_cl, extack);
        if (err == 0) {
                tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
                /* We just create a new class, need to do reverse binding. */
index 2dbd249c0b2fc86c88995f34889c2e242bc98970..cd49afca96177bed5c37caa19e4f19b0c452a0a1 100644 (file)
@@ -82,7 +82,8 @@ static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
 }
 
 static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
-                       struct Qdisc *new, struct Qdisc **old)
+                       struct Qdisc *new, struct Qdisc **old,
+                       struct netlink_ext_ack *extack)
 {
        struct atm_qdisc_data *p = qdisc_priv(sch);
        struct atm_flow_data *flow = (struct atm_flow_data *)arg;
@@ -191,7 +192,8 @@ static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
 };
 
 static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
-                        struct nlattr **tca, unsigned long *arg)
+                        struct nlattr **tca, unsigned long *arg,
+                        struct netlink_ext_ack *extack)
 {
        struct atm_qdisc_data *p = qdisc_priv(sch);
        struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
@@ -281,13 +283,15 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
                goto err_out;
        }
 
-       error = tcf_block_get(&flow->block, &flow->filter_list, sch);
+       error = tcf_block_get(&flow->block, &flow->filter_list, sch,
+                             extack);
        if (error) {
                kfree(flow);
                goto err_out;
        }
 
-       flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
+       flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
+                                   extack);
        if (!flow->q)
                flow->q = &noop_qdisc;
        pr_debug("atm_tc_change: qdisc %p\n", flow->q);
@@ -356,7 +360,8 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
        }
 }
 
-static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                         struct netlink_ext_ack *extack)
 {
        struct atm_qdisc_data *p = qdisc_priv(sch);
        struct atm_flow_data *flow = (struct atm_flow_data *)cl;
@@ -531,7 +536,8 @@ static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
        return p->link.q->ops->peek(p->link.q);
 }
 
-static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
+static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct atm_qdisc_data *p = qdisc_priv(sch);
        int err;
@@ -541,12 +547,13 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
        INIT_LIST_HEAD(&p->link.list);
        list_add(&p->link.list, &p->flows);
        p->link.q = qdisc_create_dflt(sch->dev_queue,
-                                     &pfifo_qdisc_ops, sch->handle);
+                                     &pfifo_qdisc_ops, sch->handle, extack);
        if (!p->link.q)
                p->link.q = &noop_qdisc;
        pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
 
-       err = tcf_block_get(&p->link.block, &p->link.filter_list, sch);
+       err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
+                           extack);
        if (err)
                return err;
 
index 525eb3a6d625164e9193da65dd67795d6cdd3cc2..f42025d53cfe1e4fcd91931bf14b7328896db1e2 100644 (file)
@@ -1132,7 +1132,8 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
        [TCA_CBQ_POLICE]        = { .len = sizeof(struct tc_cbq_police) },
 };
 
-static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
+static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_CBQ_MAX + 1];
@@ -1143,22 +1144,27 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
        q->delay_timer.function = cbq_undelay;
 
-       if (!opt)
+       if (!opt) {
+               NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
                return -EINVAL;
+       }
 
-       err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
+       err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
        if (err < 0)
                return err;
 
-       if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
+       if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
+               NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
                return -EINVAL;
+       }
 
        r = nla_data(tb[TCA_CBQ_RATE]);
 
-       if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
+       q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
+       if (!q->link.R_tab)
                return -EINVAL;
 
-       err = tcf_block_get(&q->link.block, &q->link.filter_list, sch);
+       err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
        if (err)
                goto put_rtab;
 
@@ -1170,7 +1176,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        q->link.common.classid = sch->handle;
        q->link.qdisc = sch;
        q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                     sch->handle);
+                                     sch->handle, NULL);
        if (!q->link.q)
                q->link.q = &noop_qdisc;
        else
@@ -1369,13 +1375,13 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 }
 
 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                    struct Qdisc **old)
+                    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct cbq_class *cl = (struct cbq_class *)arg;
 
        if (new == NULL) {
-               new = qdisc_create_dflt(sch->dev_queue,
-                                       &pfifo_qdisc_ops, cl->common.classid);
+               new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                       cl->common.classid, extack);
                if (new == NULL)
                        return -ENOBUFS;
        }
@@ -1450,7 +1456,7 @@ static void cbq_destroy(struct Qdisc *sch)
 
 static int
 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
-                unsigned long *arg)
+                unsigned long *arg, struct netlink_ext_ack *extack)
 {
        int err;
        struct cbq_sched_data *q = qdisc_priv(sch);
@@ -1460,29 +1466,37 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        struct cbq_class *parent;
        struct qdisc_rate_table *rtab = NULL;
 
-       if (opt == NULL)
+       if (!opt) {
+               NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
                return -EINVAL;
+       }
 
-       err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
+       err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
        if (err < 0)
                return err;
 
-       if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
+       if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
+               NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
                return -EOPNOTSUPP;
+       }
 
        if (cl) {
                /* Check parent */
                if (parentid) {
                        if (cl->tparent &&
-                           cl->tparent->common.classid != parentid)
+                           cl->tparent->common.classid != parentid) {
+                               NL_SET_ERR_MSG(extack, "Invalid parent id");
                                return -EINVAL;
-                       if (!cl->tparent && parentid != TC_H_ROOT)
+                       }
+                       if (!cl->tparent && parentid != TC_H_ROOT) {
+                               NL_SET_ERR_MSG(extack, "Parent must be root");
                                return -EINVAL;
+                       }
                }
 
                if (tb[TCA_CBQ_RATE]) {
                        rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
-                                             tb[TCA_CBQ_RTAB]);
+                                             tb[TCA_CBQ_RTAB], extack);
                        if (rtab == NULL)
                                return -EINVAL;
                }
@@ -1494,6 +1508,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
                                                    qdisc_root_sleeping_running(sch),
                                                    tca[TCA_RATE]);
                        if (err) {
+                               NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
                                qdisc_put_rtab(rtab);
                                return err;
                        }
@@ -1532,19 +1547,23 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        if (parentid == TC_H_ROOT)
                return -EINVAL;
 
-       if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
-           tb[TCA_CBQ_LSSOPT] == NULL)
+       if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
+               NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
                return -EINVAL;
+       }
 
-       rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
+       rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
+                             extack);
        if (rtab == NULL)
                return -EINVAL;
 
        if (classid) {
                err = -EINVAL;
                if (TC_H_MAJ(classid ^ sch->handle) ||
-                   cbq_class_lookup(q, classid))
+                   cbq_class_lookup(q, classid)) {
+                       NL_SET_ERR_MSG(extack, "Specified class not found");
                        goto failure;
+               }
        } else {
                int i;
                classid = TC_H_MAKE(sch->handle, 0x8000);
@@ -1556,8 +1575,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
                                break;
                }
                err = -ENOSR;
-               if (i >= 0x8000)
+               if (i >= 0x8000) {
+                       NL_SET_ERR_MSG(extack, "Unable to generate classid");
                        goto failure;
+               }
                classid = classid|q->hgenerator;
        }
 
@@ -1565,8 +1586,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        if (parentid) {
                parent = cbq_class_lookup(q, parentid);
                err = -EINVAL;
-               if (parent == NULL)
+               if (!parent) {
+                       NL_SET_ERR_MSG(extack, "Failed to find parentid");
                        goto failure;
+               }
        }
 
        err = -ENOBUFS;
@@ -1574,7 +1597,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        if (cl == NULL)
                goto failure;
 
-       err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+       err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
        if (err) {
                kfree(cl);
                return err;
@@ -1586,6 +1609,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
                                        qdisc_root_sleeping_running(sch),
                                        tca[TCA_RATE]);
                if (err) {
+                       NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
                        tcf_block_put(cl->block);
                        kfree(cl);
                        goto failure;
@@ -1594,7 +1618,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 
        cl->R_tab = rtab;
        rtab = NULL;
-       cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
+       cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
+                                 NULL);
        if (!cl->q)
                cl->q = &noop_qdisc;
        else
@@ -1678,7 +1703,8 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
        return 0;
 }
 
-static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
+                                      struct netlink_ext_ack *extack)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl = (struct cbq_class *)arg;
index 7a72980c15092ca10a781e5c4e4f9619d057fb63..cdd96b9a27bcf1ef510e282ff97a8710132b1ce6 100644 (file)
@@ -219,14 +219,17 @@ static void cbs_disable_offload(struct net_device *dev,
 }
 
 static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
-                             const struct tc_cbs_qopt *opt)
+                             const struct tc_cbs_qopt *opt,
+                             struct netlink_ext_ack *extack)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
        struct tc_cbs_qopt_offload cbs = { };
        int err;
 
-       if (!ops->ndo_setup_tc)
+       if (!ops->ndo_setup_tc) {
+               NL_SET_ERR_MSG(extack, "Specified device does not support cbs offload");
                return -EOPNOTSUPP;
+       }
 
        cbs.queue = q->queue;
 
@@ -237,8 +240,10 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
        cbs.sendslope = opt->sendslope;
 
        err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs);
-       if (err < 0)
+       if (err < 0) {
+               NL_SET_ERR_MSG(extack, "Specified device failed to setup cbs hardware offload");
                return err;
+       }
 
        q->enqueue = cbs_enqueue_offload;
        q->dequeue = cbs_dequeue_offload;
@@ -246,7 +251,8 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
        return 0;
 }
 
-static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
+static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct cbs_sched_data *q = qdisc_priv(sch);
        struct net_device *dev = qdisc_dev(sch);
@@ -254,12 +260,14 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
        struct tc_cbs_qopt *qopt;
        int err;
 
-       err = nla_parse_nested(tb, TCA_CBS_MAX, opt, cbs_policy, NULL);
+       err = nla_parse_nested(tb, TCA_CBS_MAX, opt, cbs_policy, extack);
        if (err < 0)
                return err;
 
-       if (!tb[TCA_CBS_PARMS])
+       if (!tb[TCA_CBS_PARMS]) {
+               NL_SET_ERR_MSG(extack, "Missing CBS parameter which are mandatory");
                return -EINVAL;
+       }
 
        qopt = nla_data(tb[TCA_CBS_PARMS]);
 
@@ -276,7 +284,7 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
 
                cbs_disable_offload(dev, q);
        } else {
-               err = cbs_enable_offload(dev, q, qopt);
+               err = cbs_enable_offload(dev, q, qopt, extack);
                if (err < 0)
                        return err;
        }
@@ -291,13 +299,16 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int cbs_init(struct Qdisc *sch, struct nlattr *opt)
+static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct cbs_sched_data *q = qdisc_priv(sch);
        struct net_device *dev = qdisc_dev(sch);
 
-       if (!opt)
+       if (!opt) {
+               NL_SET_ERR_MSG(extack, "Missing CBS qdisc options  which are mandatory");
                return -EINVAL;
+       }
 
        q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
 
@@ -306,7 +317,7 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt)
 
        qdisc_watchdog_init(&q->watchdog, sch);
 
-       return cbs_change(sch, opt);
+       return cbs_change(sch, opt, extack);
 }
 
 static void cbs_destroy(struct Qdisc *sch)
index 531250fceb9e5a75d6a8b843e5e5fd9d481fddf2..eafc0d17d1748cf01f33d206cb46dde6ed81116c 100644 (file)
@@ -344,7 +344,8 @@ static void choke_free(void *addr)
        kvfree(addr);
 }
 
-static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+static int choke_change(struct Qdisc *sch, struct nlattr *opt,
+                       struct netlink_ext_ack *extack)
 {
        struct choke_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_CHOKE_MAX + 1];
@@ -431,9 +432,10 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int choke_init(struct Qdisc *sch, struct nlattr *opt)
+static int choke_init(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
-       return choke_change(sch, opt);
+       return choke_change(sch, opt, extack);
 }
 
 static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
index c518a1efcb9d3bd98d1de0d921bd250ed46bc69c..17cd81f84b5de9d9f53acf2c9078a7618927ddc5 100644 (file)
@@ -130,7 +130,8 @@ static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
        [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
 };
 
-static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+static int codel_change(struct Qdisc *sch, struct nlattr *opt,
+                       struct netlink_ext_ack *extack)
 {
        struct codel_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_CODEL_MAX + 1];
@@ -184,7 +185,8 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+static int codel_init(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct codel_sched_data *q = qdisc_priv(sch);
 
@@ -196,7 +198,7 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt)
        q->params.mtu = psched_mtu(qdisc_dev(sch));
 
        if (opt) {
-               int err = codel_change(sch, opt);
+               int err = codel_change(sch, opt, extack);
 
                if (err)
                        return err;
index 5bbcef3dcd8cab49881292aa39ca213e456e7df2..e0b0cf8a993933b06adeeec8798b44fbe9e9cb90 100644 (file)
@@ -64,7 +64,8 @@ static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
 };
 
 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-                           struct nlattr **tca, unsigned long *arg)
+                           struct nlattr **tca, unsigned long *arg,
+                           struct netlink_ext_ack *extack)
 {
        struct drr_sched *q = qdisc_priv(sch);
        struct drr_class *cl = (struct drr_class *)*arg;
@@ -73,17 +74,21 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        u32 quantum;
        int err;
 
-       if (!opt)
+       if (!opt) {
+               NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
                return -EINVAL;
+       }
 
-       err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, NULL);
+       err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, extack);
        if (err < 0)
                return err;
 
        if (tb[TCA_DRR_QUANTUM]) {
                quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
-               if (quantum == 0)
+               if (quantum == 0) {
+                       NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
                        return -EINVAL;
+               }
        } else
                quantum = psched_mtu(qdisc_dev(sch));
 
@@ -94,8 +99,10 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                                                    NULL,
                                                    qdisc_root_sleeping_running(sch),
                                                    tca[TCA_RATE]);
-                       if (err)
+                       if (err) {
+                               NL_SET_ERR_MSG(extack, "Failed to replace estimator");
                                return err;
+                       }
                }
 
                sch_tree_lock(sch);
@@ -113,7 +120,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        cl->common.classid = classid;
        cl->quantum        = quantum;
        cl->qdisc          = qdisc_create_dflt(sch->dev_queue,
-                                              &pfifo_qdisc_ops, classid);
+                                              &pfifo_qdisc_ops, classid,
+                                              NULL);
        if (cl->qdisc == NULL)
                cl->qdisc = &noop_qdisc;
        else
@@ -125,6 +133,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                                            qdisc_root_sleeping_running(sch),
                                            tca[TCA_RATE]);
                if (err) {
+                       NL_SET_ERR_MSG(extack, "Failed to replace estimator");
                        qdisc_destroy(cl->qdisc);
                        kfree(cl);
                        return err;
@@ -172,12 +181,15 @@ static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
        return (unsigned long)drr_find_class(sch, classid);
 }
 
-static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                      struct netlink_ext_ack *extack)
 {
        struct drr_sched *q = qdisc_priv(sch);
 
-       if (cl)
+       if (cl) {
+               NL_SET_ERR_MSG(extack, "DRR classid must be zero");
                return NULL;
+       }
 
        return q->block;
 }
@@ -201,13 +213,14 @@ static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 }
 
 static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
-                          struct Qdisc *new, struct Qdisc **old)
+                          struct Qdisc *new, struct Qdisc **old,
+                          struct netlink_ext_ack *extack)
 {
        struct drr_class *cl = (struct drr_class *)arg;
 
        if (new == NULL) {
-               new = qdisc_create_dflt(sch->dev_queue,
-                                       &pfifo_qdisc_ops, cl->common.classid);
+               new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                       cl->common.classid, NULL);
                if (new == NULL)
                        new = &noop_qdisc;
        }
@@ -408,12 +421,13 @@ out:
        return NULL;
 }
 
-static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+                         struct netlink_ext_ack *extack)
 {
        struct drr_sched *q = qdisc_priv(sch);
        int err;
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
        err = qdisc_class_hash_init(&q->clhash);
index fb4fb71c68cfa6557d042b2bbc5ccebc5493e8a4..049714c57075c6acb212e4cfb47d962fda67b2bb 100644 (file)
@@ -61,7 +61,8 @@ static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
 /* ------------------------- Class/flow operations ------------------------- */
 
 static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
-                       struct Qdisc *new, struct Qdisc **old)
+                       struct Qdisc *new, struct Qdisc **old,
+                       struct netlink_ext_ack *extack)
 {
        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 
@@ -70,7 +71,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
 
        if (new == NULL) {
                new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                       sch->handle);
+                                       sch->handle, NULL);
                if (new == NULL)
                        new = &noop_qdisc;
        }
@@ -112,7 +113,8 @@ static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
 };
 
 static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
-                        struct nlattr **tca, unsigned long *arg)
+                        struct nlattr **tca, unsigned long *arg,
+                        struct netlink_ext_ack *extack)
 {
        struct dsmark_qdisc_data *p = qdisc_priv(sch);
        struct nlattr *opt = tca[TCA_OPTIONS];
@@ -184,7 +186,8 @@ ignore:
        }
 }
 
-static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                         struct netlink_ext_ack *extack)
 {
        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 
@@ -330,7 +333,8 @@ static struct sk_buff *dsmark_peek(struct Qdisc *sch)
        return p->q->ops->peek(p->q);
 }
 
-static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
+static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct dsmark_qdisc_data *p = qdisc_priv(sch);
        struct nlattr *tb[TCA_DSMARK_MAX + 1];
@@ -344,7 +348,7 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
        if (!opt)
                goto errout;
 
-       err = tcf_block_get(&p->block, &p->filter_list, sch);
+       err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
        if (err)
                return err;
 
@@ -377,7 +381,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
        p->default_index = default_index;
        p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
 
-       p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
+       p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
+                                NULL);
        if (p->q == NULL)
                p->q = &noop_qdisc;
        else
index 1e37247656f80e5c3368538c2f73b5f86c77ad50..24893d3b5d229d27ec05760815ab222ae052271b 100644 (file)
@@ -55,7 +55,8 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        return NET_XMIT_CN;
 }
 
-static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
+static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
+                    struct netlink_ext_ack *extack)
 {
        bool bypass;
        bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
@@ -157,7 +158,7 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
                nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
                ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
 
-               ret = q->ops->change(q, nla);
+               ret = q->ops->change(q, nla, NULL);
                kfree(nla);
        }
        return ret;
@@ -165,12 +166,14 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
 EXPORT_SYMBOL(fifo_set_limit);
 
 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
-                              unsigned int limit)
+                              unsigned int limit,
+                              struct netlink_ext_ack *extack)
 {
        struct Qdisc *q;
        int err = -ENOMEM;
 
-       q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1));
+       q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
+                             extack);
        if (q) {
                err = fifo_set_limit(q, limit);
                if (err < 0) {
index 263d16e3219e6d747496619fe5fedd03c22ba44f..a366e4c9413ab4fe4dfb16f0255cb7632ade7f1c 100644 (file)
@@ -685,7 +685,8 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
        [TCA_FQ_LOW_RATE_THRESHOLD]     = { .type = NLA_U32 },
 };
 
-static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+static int fq_change(struct Qdisc *sch, struct nlattr *opt,
+                    struct netlink_ext_ack *extack)
 {
        struct fq_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_FQ_MAX + 1];
@@ -788,7 +789,8 @@ static void fq_destroy(struct Qdisc *sch)
        qdisc_watchdog_cancel(&q->watchdog);
 }
 
-static int fq_init(struct Qdisc *sch, struct nlattr *opt)
+static int fq_init(struct Qdisc *sch, struct nlattr *opt,
+                  struct netlink_ext_ack *extack)
 {
        struct fq_sched_data *q = qdisc_priv(sch);
        int err;
@@ -811,7 +813,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
        qdisc_watchdog_init(&q->watchdog, sch);
 
        if (opt)
-               err = fq_change(sch, opt);
+               err = fq_change(sch, opt, extack);
        else
                err = fq_resize(sch, q->fq_trees_log);
 
index 0305d791ea943f5c7d108b2e5c877b2e16f0b6a5..22fa13cf5d8b89916cb004438ed5132adbad521a 100644 (file)
@@ -377,7 +377,8 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
        [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
 };
 
-static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
+static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
+                          struct netlink_ext_ack *extack)
 {
        struct fq_codel_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
@@ -458,7 +459,8 @@ static void fq_codel_destroy(struct Qdisc *sch)
        kvfree(q->flows);
 }
 
-static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
+static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
+                        struct netlink_ext_ack *extack)
 {
        struct fq_codel_sched_data *q = qdisc_priv(sch);
        int i;
@@ -477,12 +479,12 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
        q->cparams.mtu = psched_mtu(qdisc_dev(sch));
 
        if (opt) {
-               int err = fq_codel_change(sch, opt);
+               int err = fq_codel_change(sch, opt, extack);
                if (err)
                        return err;
        }
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
@@ -595,7 +597,8 @@ static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
 {
 }
 
-static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                           struct netlink_ext_ack *extack)
 {
        struct fq_codel_sched_data *q = qdisc_priv(sch);
 
index 981c08fe810b4df91ddd0cd1cfaa0f7078feb5b5..28b2a7964133c5517661f329c7660b4388bf52d8 100644 (file)
@@ -32,6 +32,7 @@
 #include <net/pkt_sched.h>
 #include <net/dst.h>
 #include <trace/events/qdisc.h>
+#include <net/xfrm.h>
 
 /* Qdisc to use by default */
 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
@@ -230,6 +231,8 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
 
                /* skb in gso_skb were already validated */
                *validate = false;
+               if (xfrm_offload(skb))
+                       *validate = true;
                /* check the reason of requeuing without tx lock first */
                txq = skb_get_tx_queue(txq->dev, skb);
                if (!netif_xmit_frozen_or_stopped(txq)) {
@@ -285,6 +288,7 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
                     spinlock_t *root_lock, bool validate)
 {
        int ret = NETDEV_TX_BUSY;
+       bool again = false;
 
        /* And release qdisc */
        if (root_lock)
@@ -292,7 +296,17 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
 
        /* Note that we validate skb (GSO, checksum, ...) outside of locks */
        if (validate)
-               skb = validate_xmit_skb_list(skb, dev);
+               skb = validate_xmit_skb_list(skb, dev, &again);
+
+#ifdef CONFIG_XFRM_OFFLOAD
+       if (unlikely(again)) {
+               if (root_lock)
+                       spin_lock(root_lock);
+
+               dev_requeue_skb(skb, q);
+               return false;
+       }
+#endif
 
        if (likely(skb)) {
                HARD_TX_LOCK(dev, txq, smp_processor_id());
@@ -551,7 +565,8 @@ struct Qdisc noop_qdisc = {
 };
 EXPORT_SYMBOL(noop_qdisc);
 
-static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
+static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
+                       struct netlink_ext_ack *extack)
 {
        /* register_qdisc() assigns a default of noop_enqueue if unset,
         * but __dev_queue_xmit() treats noqueue only as such
@@ -659,6 +674,12 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
                struct skb_array *q = band2list(priv, band);
                struct sk_buff *skb;
 
+               /* NULL ring is possible if destroy path is due to a failed
+                * skb_array_init() in pfifo_fast_init() case.
+                */
+               if (!q->ring.queue)
+                       continue;
+
                while ((skb = skb_array_consume_bh(q)) != NULL)
                        kfree_skb(skb);
        }
@@ -684,7 +705,8 @@ nla_put_failure:
        return -1;
 }
 
-static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
+static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
+                          struct netlink_ext_ack *extack)
 {
        unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
@@ -719,7 +741,7 @@ static void pfifo_fast_destroy(struct Qdisc *sch)
                /* NULL ring is possible if destroy path is due to a failed
                 * skb_array_init() in pfifo_fast_init() case.
                 */
-               if (!&q->ring.queue)
+               if (!q->ring.queue)
                        continue;
                /* Destroy ring but no need to kfree_skb because a call to
                 * pfifo_fast_reset() has already done that work.
@@ -747,7 +769,8 @@ static struct lock_class_key qdisc_tx_busylock;
 static struct lock_class_key qdisc_running_key;
 
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
-                         const struct Qdisc_ops *ops)
+                         const struct Qdisc_ops *ops,
+                         struct netlink_ext_ack *extack)
 {
        void *p;
        struct Qdisc *sch;
@@ -756,6 +779,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        struct net_device *dev;
 
        if (!dev_queue) {
+               NL_SET_ERR_MSG(extack, "No device queue given");
                err = -EINVAL;
                goto errout;
        }
@@ -820,21 +844,24 @@ errout:
 
 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
                                const struct Qdisc_ops *ops,
-                               unsigned int parentid)
+                               unsigned int parentid,
+                               struct netlink_ext_ack *extack)
 {
        struct Qdisc *sch;
 
-       if (!try_module_get(ops->owner))
+       if (!try_module_get(ops->owner)) {
+               NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
                return NULL;
+       }
 
-       sch = qdisc_alloc(dev_queue, ops);
+       sch = qdisc_alloc(dev_queue, ops, extack);
        if (IS_ERR(sch)) {
                module_put(ops->owner);
                return NULL;
        }
        sch->parent = parentid;
 
-       if (!ops->init || ops->init(sch, NULL) == 0)
+       if (!ops->init || ops->init(sch, NULL, extack) == 0)
                return sch;
 
        qdisc_destroy(sch);
@@ -946,7 +973,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
        if (dev->priv_flags & IFF_NO_QUEUE)
                ops = &noqueue_qdisc_ops;
 
-       qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT);
+       qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
        if (!qdisc) {
                netdev_info(dev, "activation failed\n");
                return;
@@ -969,7 +996,7 @@ static void attach_default_qdiscs(struct net_device *dev)
                dev->qdisc = txq->qdisc_sleeping;
                qdisc_refcount_inc(dev->qdisc);
        } else {
-               qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
+               qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
                if (qdisc) {
                        dev->qdisc = qdisc;
                        qdisc->ops->attach(qdisc);
index bc30f9186ac67cd7b1c21d4d2b0035d3a6b886af..cbe4831f46f4f3913a4e94a9444ad5aa6206c9df 100644 (file)
@@ -306,12 +306,13 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
        struct tc_gred_sopt *sopt;
        int i;
 
-       if (dps == NULL)
+       if (!dps)
                return -EINVAL;
 
        sopt = nla_data(dps);
 
-       if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
+       if (sopt->DPs > MAX_DPs || sopt->DPs == 0 ||
+           sopt->def_DP >= sopt->DPs)
                return -EINVAL;
 
        sch_tree_lock(sch);
@@ -391,7 +392,8 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
        [TCA_GRED_LIMIT]        = { .type = NLA_U32 },
 };
 
-static int gred_change(struct Qdisc *sch, struct nlattr *opt)
+static int gred_change(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct gred_sched *table = qdisc_priv(sch);
        struct tc_gred_qopt *ctl;
@@ -465,12 +467,13 @@ errout:
        return err;
 }
 
-static int gred_init(struct Qdisc *sch, struct nlattr *opt)
+static int gred_init(struct Qdisc *sch, struct nlattr *opt,
+                    struct netlink_ext_ack *extack)
 {
        struct nlattr *tb[TCA_GRED_MAX + 1];
        int err;
 
-       if (opt == NULL)
+       if (!opt)
                return -EINVAL;
 
        err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
index d04068a97d81fd06d138c3ec277ac9309852601e..3ae9877ea2057d0ba517c84d38f6ba6a79ff6ef8 100644 (file)
@@ -921,7 +921,8 @@ static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
 
 static int
 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-                 struct nlattr **tca, unsigned long *arg)
+                 struct nlattr **tca, unsigned long *arg,
+                 struct netlink_ext_ack *extack)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
        struct hfsc_class *cl = (struct hfsc_class *)*arg;
@@ -1033,7 +1034,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        if (cl == NULL)
                return -ENOBUFS;
 
-       err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+       err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
        if (err) {
                kfree(cl);
                return err;
@@ -1061,8 +1062,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        cl->cl_common.classid = classid;
        cl->sched     = q;
        cl->cl_parent = parent;
-       cl->qdisc = qdisc_create_dflt(sch->dev_queue,
-                                     &pfifo_qdisc_ops, classid);
+       cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                     classid, NULL);
        if (cl->qdisc == NULL)
                cl->qdisc = &noop_qdisc;
        else
@@ -1176,7 +1177,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
 
 static int
 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                struct Qdisc **old)
+                struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct hfsc_class *cl = (struct hfsc_class *)arg;
 
@@ -1184,7 +1185,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
                return -EINVAL;
        if (new == NULL) {
                new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                       cl->cl_common.classid);
+                                       cl->cl_common.classid, NULL);
                if (new == NULL)
                        new = &noop_qdisc;
        }
@@ -1246,7 +1247,8 @@ hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
        cl->filter_cnt--;
 }
 
-static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg,
+                                       struct netlink_ext_ack *extack)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
        struct hfsc_class *cl = (struct hfsc_class *)arg;
@@ -1388,7 +1390,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
 }
 
 static int
-hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+               struct netlink_ext_ack *extack)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
        struct tc_hfsc_qopt *qopt;
@@ -1396,7 +1399,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
 
        qdisc_watchdog_init(&q->watchdog, sch);
 
-       if (opt == NULL || nla_len(opt) < sizeof(*qopt))
+       if (!opt || nla_len(opt) < sizeof(*qopt))
                return -EINVAL;
        qopt = nla_data(opt);
 
@@ -1406,14 +1409,14 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
                return err;
        q->eligible = RB_ROOT;
 
-       err = tcf_block_get(&q->root.block, &q->root.filter_list, sch);
+       err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack);
        if (err)
                return err;
 
        q->root.cl_common.classid = sch->handle;
        q->root.sched   = q;
        q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                         sch->handle);
+                                         sch->handle, NULL);
        if (q->root.qdisc == NULL)
                q->root.qdisc = &noop_qdisc;
        else
@@ -1429,7 +1432,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
 }
 
 static int
-hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
+hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt,
+                 struct netlink_ext_ack *extack)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
        struct tc_hfsc_qopt *qopt;
index 73a53c08091baafde3ef776ce9ea99cac9edfd9d..bce2632212d3e795b9ff5cac779f25a5c94bcac3 100644 (file)
@@ -504,7 +504,8 @@ static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
        [TCA_HHF_NON_HH_WEIGHT]  = { .type = NLA_U32 },
 };
 
-static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
+static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct hhf_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_HHF_MAX + 1];
@@ -571,7 +572,8 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
+static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct hhf_sched_data *q = qdisc_priv(sch);
        int i;
@@ -589,7 +591,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
        q->hhf_non_hh_weight = 2;
 
        if (opt) {
-               int err = hhf_change(sch, opt);
+               int err = hhf_change(sch, opt, extack);
 
                if (err)
                        return err;
index fa0380730ff0513d043e25011181efabd9be8fe5..1ea9846cc6ce91f4f0455d2c493e31750fd65478 100644 (file)
@@ -1017,7 +1017,8 @@ static void htb_work_func(struct work_struct *work)
        rcu_read_unlock();
 }
 
-static int htb_init(struct Qdisc *sch, struct nlattr *opt)
+static int htb_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct htb_sched *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_HTB_MAX + 1];
@@ -1031,7 +1032,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
        if (!opt)
                return -EINVAL;
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
@@ -1171,7 +1172,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
 }
 
 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                    struct Qdisc **old)
+                    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct htb_class *cl = (struct htb_class *)arg;
 
@@ -1179,7 +1180,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
                return -EINVAL;
        if (new == NULL &&
            (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                    cl->common.classid)) == NULL)
+                                    cl->common.classid, extack)) == NULL)
                return -ENOBUFS;
 
        *old = qdisc_replace(sch, new, &cl->un.leaf.q);
@@ -1289,7 +1290,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
        if (!cl->level && htb_parent_last_child(cl)) {
                new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                         cl->parent->common.classid);
+                                         cl->parent->common.classid,
+                                         NULL);
                last_child = 1;
        }
 
@@ -1326,7 +1328,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
 static int htb_change_class(struct Qdisc *sch, u32 classid,
                            u32 parentid, struct nlattr **tca,
-                           unsigned long *arg)
+                           unsigned long *arg, struct netlink_ext_ack *extack)
 {
        int err = -EINVAL;
        struct htb_sched *q = qdisc_priv(sch);
@@ -1356,10 +1358,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 
        /* Keeping backward compatible with rate_table based iproute2 tc */
        if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
-               qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]));
+               qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
+                                             NULL));
 
        if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
-               qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]));
+               qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
+                                             NULL));
 
        if (!cl) {              /* new class */
                struct Qdisc *new_q;
@@ -1394,7 +1398,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                if (!cl)
                        goto failure;
 
-               err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+               err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
                if (err) {
                        kfree(cl);
                        goto failure;
@@ -1423,8 +1427,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                 * so that can't be used inside of sch_tree_lock
                 * -- thanks to Karlis Peisenieks
                 */
-               new_q = qdisc_create_dflt(sch->dev_queue,
-                                         &pfifo_qdisc_ops, classid);
+               new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                         classid, NULL);
                sch_tree_lock(sch);
                if (parent && !parent->level) {
                        unsigned int qlen = parent->un.leaf.q->q.qlen;
@@ -1524,7 +1528,8 @@ failure:
        return err;
 }
 
-static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
+                                      struct netlink_ext_ack *extack)
 {
        struct htb_sched *q = qdisc_priv(sch);
        struct htb_class *cl = (struct htb_class *)arg;
index fc1286f499c1462ab29c5054f734237788974e0e..7ca2be20dd6fbce91121e1e6f955b3615ebb2d3c 100644 (file)
@@ -48,7 +48,8 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 {
 }
 
-static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                          struct netlink_ext_ack *extack)
 {
        struct ingress_sched_data *q = qdisc_priv(sch);
 
@@ -62,7 +63,8 @@ static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
        mini_qdisc_pair_swap(miniqp, tp_head);
 }
 
-static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
+static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
+                       struct netlink_ext_ack *extack)
 {
        struct ingress_sched_data *q = qdisc_priv(sch);
        struct net_device *dev = qdisc_dev(sch);
@@ -76,7 +78,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
        q->block_info.chain_head_change = clsact_chain_head_change;
        q->block_info.chain_head_change_priv = &q->miniqp;
 
-       err = tcf_block_get_ext(&q->block, sch, &q->block_info);
+       err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
        if (err)
                return err;
 
@@ -153,7 +155,8 @@ static unsigned long clsact_bind_filter(struct Qdisc *sch,
        return clsact_find(sch, classid);
 }
 
-static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                         struct netlink_ext_ack *extack)
 {
        struct clsact_sched_data *q = qdisc_priv(sch);
 
@@ -167,7 +170,8 @@ static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
        }
 }
 
-static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
+static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct clsact_sched_data *q = qdisc_priv(sch);
        struct net_device *dev = qdisc_dev(sch);
@@ -182,7 +186,8 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
        q->ingress_block_info.chain_head_change = clsact_chain_head_change;
        q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
 
-       err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info);
+       err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
+                               extack);
        if (err)
                return err;
 
@@ -192,7 +197,8 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
        q->egress_block_info.chain_head_change = clsact_chain_head_change;
        q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
 
-       err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info);
+       err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info,
+                               extack);
        if (err)
                return err;
 
index 8cbb5c829d59fcec6b125d87d2e0b2edeb7d7007..f062a18e9162ad215343023b69c3e87a81cf6651 100644 (file)
@@ -36,7 +36,8 @@ static void mq_destroy(struct Qdisc *sch)
        kfree(priv->qdiscs);
 }
 
-static int mq_init(struct Qdisc *sch, struct nlattr *opt)
+static int mq_init(struct Qdisc *sch, struct nlattr *opt,
+                  struct netlink_ext_ack *extack)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct mq_sched *priv = qdisc_priv(sch);
@@ -60,7 +61,8 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
                dev_queue = netdev_get_tx_queue(dev, ntx);
                qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
                                          TC_H_MAKE(TC_H_MAJ(sch->handle),
-                                                   TC_H_MIN(ntx + 1)));
+                                                   TC_H_MIN(ntx + 1)),
+                                         extack);
                if (!qdisc)
                        return -ENOMEM;
                priv->qdiscs[ntx] = qdisc;
@@ -154,7 +156,7 @@ static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
 }
 
 static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
-                   struct Qdisc **old)
+                   struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
        struct net_device *dev = qdisc_dev(sch);
index 8622745f3cd91f1e4ec2a971fcc812bdcff9d148..0e9d761cdd80a18bdfc824503dcd8e5a8ee5c596 100644 (file)
@@ -132,7 +132,8 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
        return 0;
 }
 
-static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct mqprio_sched *priv = qdisc_priv(sch);
@@ -229,7 +230,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
                qdisc = qdisc_create_dflt(dev_queue,
                                          get_default_qdisc_ops(dev, i),
                                          TC_H_MAKE(TC_H_MAJ(sch->handle),
-                                                   TC_H_MIN(i + 1)));
+                                                   TC_H_MIN(i + 1)), extack);
                if (!qdisc)
                        return -ENOMEM;
 
@@ -319,7 +320,7 @@ static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
 }
 
 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
-                   struct Qdisc **old)
+                       struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
index 012216386c0b74875e39a45449401897e6fdbb54..1da7ea8de0ad37a3ee1615b0f5aae68391b08842 100644 (file)
@@ -180,7 +180,8 @@ multiq_destroy(struct Qdisc *sch)
        kfree(q->queues);
 }
 
-static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
+static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct multiq_sched_data *q = qdisc_priv(sch);
        struct tc_multiq_qopt *qopt;
@@ -215,7 +216,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
                        child = qdisc_create_dflt(sch->dev_queue,
                                                  &pfifo_qdisc_ops,
                                                  TC_H_MAKE(sch->handle,
-                                                           i + 1));
+                                                           i + 1), extack);
                        if (child) {
                                sch_tree_lock(sch);
                                old = q->queues[i];
@@ -236,17 +237,18 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
+static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct multiq_sched_data *q = qdisc_priv(sch);
        int i, err;
 
        q->queues = NULL;
 
-       if (opt == NULL)
+       if (!opt)
                return -EINVAL;
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
@@ -258,7 +260,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
        for (i = 0; i < q->max_bands; i++)
                q->queues[i] = &noop_qdisc;
 
-       return multiq_tune(sch, opt);
+       return multiq_tune(sch, opt, extack);
 }
 
 static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -281,7 +283,7 @@ nla_put_failure:
 }
 
 static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                     struct Qdisc **old)
+                       struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct multiq_sched_data *q = qdisc_priv(sch);
        unsigned long band = arg - 1;
@@ -369,7 +371,8 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
        }
 }
 
-static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                         struct netlink_ext_ack *extack)
 {
        struct multiq_sched_data *q = qdisc_priv(sch);
 
index dd70924cbcdfe4d078f05d346cacf83abb1a9f2e..7bbc13b8ca47f46ea1ae05e1568a72792a9cea2a 100644 (file)
@@ -893,7 +893,8 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 }
 
 /* Parse netlink message to set options */
-static int netem_change(struct Qdisc *sch, struct nlattr *opt)
+static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+                       struct netlink_ext_ack *extack)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_NETEM_MAX + 1];
@@ -984,7 +985,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
        return ret;
 }
 
-static int netem_init(struct Qdisc *sch, struct nlattr *opt)
+static int netem_init(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
        int ret;
@@ -995,7 +997,7 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
                return -EINVAL;
 
        q->loss_model = CLG_RANDOM;
-       ret = netem_change(sch, opt);
+       ret = netem_change(sch, opt, extack);
        if (ret)
                pr_info("netem: change failed\n");
        return ret;
@@ -1157,7 +1159,7 @@ static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                    struct Qdisc **old)
+                    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
 
index 776c694c77c71f14795cc0e0033bdedb7c461e7f..18d30bb86881aba8a5c5521181cba11038945672 100644 (file)
@@ -181,7 +181,8 @@ static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
        [TCA_PIE_BYTEMODE] = {.type = NLA_U32},
 };
 
-static int pie_change(struct Qdisc *sch, struct nlattr *opt)
+static int pie_change(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct pie_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_PIE_MAX + 1];
@@ -439,7 +440,8 @@ static void pie_timer(struct timer_list *t)
 
 }
 
-static int pie_init(struct Qdisc *sch, struct nlattr *opt)
+static int pie_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct pie_sched_data *q = qdisc_priv(sch);
 
@@ -451,7 +453,7 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt)
        timer_setup(&q->adapt_timer, pie_timer, 0);
 
        if (opt) {
-               int err = pie_change(sch, opt);
+               int err = pie_change(sch, opt, extack);
 
                if (err)
                        return err;
index 1c6cbab3e7b993fba3c836cb24ab664afeb6db66..5619d2eb17b690d5e73907ff958bb76f608130fa 100644 (file)
@@ -123,7 +123,8 @@ static struct sk_buff *plug_dequeue(struct Qdisc *sch)
        return qdisc_dequeue_head(sch);
 }
 
-static int plug_init(struct Qdisc *sch, struct nlattr *opt)
+static int plug_init(struct Qdisc *sch, struct nlattr *opt,
+                    struct netlink_ext_ack *extack)
 {
        struct plug_sched_data *q = qdisc_priv(sch);
 
@@ -158,7 +159,8 @@ static int plug_init(struct Qdisc *sch, struct nlattr *opt)
  *   command is received (just act as a pass-thru queue).
  * TCQ_PLUG_LIMIT: Increase/decrease queue size
  */
-static int plug_change(struct Qdisc *sch, struct nlattr *opt)
+static int plug_change(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct plug_sched_data *q = qdisc_priv(sch);
        struct tc_plug_qopt *msg;
index 2c79559a0d310eb1d7020270a65139a7c79504d2..fe1510eb111ff506c047063cbe516e5742d7343f 100644 (file)
@@ -153,7 +153,8 @@ prio_destroy(struct Qdisc *sch)
                qdisc_destroy(q->queues[prio]);
 }
 
-static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
+static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
+                    struct netlink_ext_ack *extack)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
        struct Qdisc *queues[TCQ_PRIO_BANDS];
@@ -175,7 +176,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
        /* Before commit, make sure we can allocate all new qdiscs */
        for (i = oldbands; i < qopt->bands; i++) {
                queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                             TC_H_MAKE(sch->handle, i + 1));
+                                             TC_H_MAKE(sch->handle, i + 1),
+                                             extack);
                if (!queues[i]) {
                        while (i > oldbands)
                                qdisc_destroy(queues[--i]);
@@ -205,7 +207,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int prio_init(struct Qdisc *sch, struct nlattr *opt)
+static int prio_init(struct Qdisc *sch, struct nlattr *opt,
+                    struct netlink_ext_ack *extack)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
        int err;
@@ -213,11 +216,11 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
        if (!opt)
                return -EINVAL;
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
-       return prio_tune(sch, opt);
+       return prio_tune(sch, opt, extack);
 }
 
 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -240,7 +243,7 @@ nla_put_failure:
 }
 
 static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                     struct Qdisc **old)
+                     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
        unsigned long band = arg - 1;
@@ -327,7 +330,8 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
        }
 }
 
-static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                       struct netlink_ext_ack *extack)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
 
index 6962b37a3ad31d9cfbc4fdf34d63c42871a00506..bb1a9c11fc549a170172e3d7deaf9693b18e7332 100644 (file)
@@ -402,7 +402,8 @@ static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
 }
 
 static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-                           struct nlattr **tca, unsigned long *arg)
+                           struct nlattr **tca, unsigned long *arg,
+                           struct netlink_ext_ack *extack)
 {
        struct qfq_sched *q = qdisc_priv(sch);
        struct qfq_class *cl = (struct qfq_class *)*arg;
@@ -479,8 +480,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        cl->common.classid = classid;
        cl->deficit = lmax;
 
-       cl->qdisc = qdisc_create_dflt(sch->dev_queue,
-                                     &pfifo_qdisc_ops, classid);
+       cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                     classid, NULL);
        if (cl->qdisc == NULL)
                cl->qdisc = &noop_qdisc;
 
@@ -564,7 +565,8 @@ static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid)
        return (unsigned long)qfq_find_class(sch, classid);
 }
 
-static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                      struct netlink_ext_ack *extack)
 {
        struct qfq_sched *q = qdisc_priv(sch);
 
@@ -593,13 +595,14 @@ static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 }
 
 static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
-                          struct Qdisc *new, struct Qdisc **old)
+                          struct Qdisc *new, struct Qdisc **old,
+                          struct netlink_ext_ack *extack)
 {
        struct qfq_class *cl = (struct qfq_class *)arg;
 
        if (new == NULL) {
-               new = qdisc_create_dflt(sch->dev_queue,
-                                       &pfifo_qdisc_ops, cl->common.classid);
+               new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                       cl->common.classid, NULL);
                if (new == NULL)
                        new = &noop_qdisc;
        }
@@ -1413,14 +1416,15 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
        qfq_deactivate_class(q, cl);
 }
 
-static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+                         struct netlink_ext_ack *extack)
 {
        struct qfq_sched *q = qdisc_priv(sch);
        struct qfq_group *grp;
        int i, j, err;
        u32 max_cl_shift, maxbudg_shift, max_classes;
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
index f0747eb87dc4784e67e0b5872dcf37effaaa4060..ec0bd36e09a90d736932c04e2c922b18b47b8ca5 100644 (file)
@@ -197,7 +197,8 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
        [TCA_RED_MAX_P] = { .type = NLA_U32 },
 };
 
-static int red_change(struct Qdisc *sch, struct nlattr *opt)
+static int red_change(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct red_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_RED_MAX + 1];
@@ -224,7 +225,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
                return -EINVAL;
 
        if (ctl->limit > 0) {
-               child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
+               child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
+                                        extack);
                if (IS_ERR(child))
                        return PTR_ERR(child);
        }
@@ -272,14 +274,15 @@ static inline void red_adaptative_timer(struct timer_list *t)
        spin_unlock(root_lock);
 }
 
-static int red_init(struct Qdisc *sch, struct nlattr *opt)
+static int red_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct red_sched_data *q = qdisc_priv(sch);
 
        q->qdisc = &noop_qdisc;
        q->sch = sch;
        timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
-       return red_change(sch, opt);
+       return red_change(sch, opt, extack);
 }
 
 static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
@@ -380,7 +383,7 @@ static int red_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                    struct Qdisc **old)
+                    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct red_sched_data *q = qdisc_priv(sch);
 
index 0678debdd856f0306448192c63e6d6c5518dc886..7cbdad8419b7f4d9c6441fce0b822db1d058b176 100644 (file)
@@ -488,7 +488,8 @@ static const struct tc_sfb_qopt sfb_default_ops = {
        .penalty_burst = 20,
 };
 
-static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
+static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct sfb_sched_data *q = qdisc_priv(sch);
        struct Qdisc *child;
@@ -512,7 +513,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
        if (limit == 0)
                limit = qdisc_dev(sch)->tx_queue_len;
 
-       child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
+       child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
        if (IS_ERR(child))
                return PTR_ERR(child);
 
@@ -549,17 +550,18 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
+static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct sfb_sched_data *q = qdisc_priv(sch);
        int err;
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
        q->qdisc = &noop_qdisc;
-       return sfb_change(sch, opt);
+       return sfb_change(sch, opt, extack);
 }
 
 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -615,7 +617,7 @@ static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                    struct Qdisc **old)
+                    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct sfb_sched_data *q = qdisc_priv(sch);
 
@@ -643,7 +645,8 @@ static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
 }
 
 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-                           struct nlattr **tca, unsigned long *arg)
+                           struct nlattr **tca, unsigned long *arg,
+                           struct netlink_ext_ack *extack)
 {
        return -ENOSYS;
 }
@@ -665,7 +668,8 @@ static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
        }
 }
 
-static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                      struct netlink_ext_ack *extack)
 {
        struct sfb_sched_data *q = qdisc_priv(sch);
 
index 930e5bd26d3d7650a41b9472463c3fc39732495b..2f2678197760ab476c6f817d0429d4b7da1a56d8 100644 (file)
@@ -721,7 +721,8 @@ static void sfq_destroy(struct Qdisc *sch)
        kfree(q->red_parms);
 }
 
-static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
+static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct sfq_sched_data *q = qdisc_priv(sch);
        int i;
@@ -730,7 +731,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
        q->sch = sch;
        timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
@@ -836,7 +837,8 @@ static void sfq_unbind(struct Qdisc *q, unsigned long cl)
 {
 }
 
-static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                      struct netlink_ext_ack *extack)
 {
        struct sfq_sched_data *q = qdisc_priv(sch);
 
index 120f4f36596786746b89a2832c125e1814d6fd9b..83e76d046993ec568f3425d25ff23fe5b42965ef 100644 (file)
@@ -302,7 +302,8 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
        [TCA_TBF_PBURST] = { .type = NLA_U32 },
 };
 
-static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
+static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        int err;
        struct tbf_sched_data *q = qdisc_priv(sch);
@@ -326,11 +327,13 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
        qopt = nla_data(tb[TCA_TBF_PARMS]);
        if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
                qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
-                                             tb[TCA_TBF_RTAB]));
+                                             tb[TCA_TBF_RTAB],
+                                             NULL));
 
        if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
                        qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
-                                                     tb[TCA_TBF_PTAB]));
+                                                     tb[TCA_TBF_PTAB],
+                                                     NULL));
 
        buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
        mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
@@ -383,7 +386,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
                if (err)
                        goto done;
        } else if (qopt->limit > 0) {
-               child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
+               child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
+                                        extack);
                if (IS_ERR(child)) {
                        err = PTR_ERR(child);
                        goto done;
@@ -421,19 +425,20 @@ done:
        return err;
 }
 
-static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
+static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
 
        qdisc_watchdog_init(&q->watchdog, sch);
        q->qdisc = &noop_qdisc;
 
-       if (opt == NULL)
+       if (!opt)
                return -EINVAL;
 
        q->t_c = ktime_get_ns();
 
-       return tbf_change(sch, opt);
+       return tbf_change(sch, opt, extack);
 }
 
 static void tbf_destroy(struct Qdisc *sch)
@@ -494,7 +499,7 @@ static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                    struct Qdisc **old)
+                    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
 
index 9fe6b427afed01dc383e5fce1908c1dce62a7ca5..93f04cf5cac12f4f76f7d5e9fb0b286d0632948e 100644 (file)
@@ -167,7 +167,8 @@ teql_destroy(struct Qdisc *sch)
        }
 }
 
-static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
+static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
+                          struct netlink_ext_ack *extack)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct teql_master *m = (struct teql_master *)sch->ops;
index 3f619fdcbf0a0b4a6f35ece8021c011f874a2d79..291c97b07058218635fcfcd06214aa79d74ec80d 100644 (file)
@@ -78,6 +78,9 @@ const char *sctp_cname(const union sctp_subtype cid)
        case SCTP_CID_AUTH:
                return "AUTH";
 
+       case SCTP_CID_RECONF:
+               return "RECONF";
+
        default:
                break;
        }
index ee1e601a0b116b4dd705bd5e7735102092f280b0..8b31468165197428239fe7e6ddece6f60878c39f 100644 (file)
@@ -232,7 +232,7 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
 {
        ep->base.dead = true;
 
-       ep->base.sk->sk_state = SCTP_SS_CLOSED;
+       inet_sk_set_state(ep->base.sk, SCTP_SS_CLOSED);
 
        /* Unlink this endpoint, so we can't find it again! */
        sctp_unhash_endpoint(ep);
index 16ddf2ca14386d9c3d893d7d81677fb51fa4ffbb..b71e7fb0a20af5f6bb6810e03c44154cd2c2b9f6 100644 (file)
@@ -878,12 +878,12 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
                 * successfully completed a connect() call.
                 */
                if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
-                       sk->sk_state = SCTP_SS_ESTABLISHED;
+                       inet_sk_set_state(sk, SCTP_SS_ESTABLISHED);
 
                /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
                if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
                    sctp_sstate(sk, ESTABLISHED)) {
-                       sk->sk_state = SCTP_SS_CLOSING;
+                       inet_sk_set_state(sk, SCTP_SS_CLOSING);
                        sk->sk_shutdown |= RCV_SHUTDOWN;
                }
        }
index 5e4100df7baeaaf6b721679ecb934b581124eda8..aadcd4244d9be65a813d69adc005fd7ed74ae6f8 100644 (file)
@@ -1544,7 +1544,7 @@ static void sctp_close(struct sock *sk, long timeout)
 
        lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
        sk->sk_shutdown = SHUTDOWN_MASK;
-       sk->sk_state = SCTP_SS_CLOSING;
+       inet_sk_set_state(sk, SCTP_SS_CLOSING);
 
        ep = sctp_sk(sk)->ep;
 
@@ -4657,7 +4657,7 @@ static void sctp_shutdown(struct sock *sk, int how)
        if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
                struct sctp_association *asoc;
 
-               sk->sk_state = SCTP_SS_CLOSING;
+               inet_sk_set_state(sk, SCTP_SS_CLOSING);
                asoc = list_entry(ep->asocs.next,
                                  struct sctp_association, asocs);
                sctp_primitive_SHUTDOWN(net, asoc, NULL);
@@ -7513,13 +7513,13 @@ static int sctp_listen_start(struct sock *sk, int backlog)
         * sockets.
         *
         */
-       sk->sk_state = SCTP_SS_LISTENING;
+       inet_sk_set_state(sk, SCTP_SS_LISTENING);
        if (!ep->base.bind_addr.port) {
                if (sctp_autobind(sk))
                        return -EAGAIN;
        } else {
                if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
-                       sk->sk_state = SCTP_SS_CLOSED;
+                       inet_sk_set_state(sk, SCTP_SS_CLOSED);
                        return -EADDRINUSE;
                }
        }
@@ -8542,10 +8542,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
         * is called, set RCV_SHUTDOWN flag.
         */
        if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
-               newsk->sk_state = SCTP_SS_CLOSED;
+               inet_sk_set_state(newsk, SCTP_SS_CLOSED);
                newsk->sk_shutdown |= RCV_SHUTDOWN;
        } else {
-               newsk->sk_state = SCTP_SS_ESTABLISHED;
+               inet_sk_set_state(newsk, SCTP_SS_ESTABLISHED);
        }
 
        release_sock(newsk);
index 97fae53310e0ea01662a282a45d09519b4c0bd9e..0b427100b0d4a29001b8124ae7c36505d8f01ff7 100644 (file)
@@ -1093,29 +1093,21 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
                      gfp_t gfp)
 {
-       struct sctp_association *asoc;
-       __u16 needed, freed;
-
-       asoc = ulpq->asoc;
+       struct sctp_association *asoc = ulpq->asoc;
+       __u32 freed = 0;
+       __u16 needed;
 
-       if (chunk) {
-               needed = ntohs(chunk->chunk_hdr->length);
-               needed -= sizeof(struct sctp_data_chunk);
-       } else
-               needed = SCTP_DEFAULT_MAXWINDOW;
-
-       freed = 0;
+       needed = ntohs(chunk->chunk_hdr->length) -
+                sizeof(struct sctp_data_chunk);
 
        if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
                freed = sctp_ulpq_renege_order(ulpq, needed);
-               if (freed < needed) {
+               if (freed < needed)
                        freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
-               }
        }
        /* If able to free enough room, accept this chunk. */
-       if (chunk && (freed >= needed)) {
-               int retval;
-               retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
+       if (freed >= needed) {
+               int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
                /*
                 * Enter partial delivery if chunk has not been
                 * delivered; otherwise, drain the reassembly queue.
index 05f361faec451cdd69168dd5e2cd7c2ca7c8f7fb..bbd2e9ceb692a3a866bd834841053b64ac60bb43 100644 (file)
@@ -162,12 +162,6 @@ static const struct file_operations socket_file_ops = {
 static DEFINE_SPINLOCK(net_family_lock);
 static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
 
-/*
- *     Statistics counters of the socket lists
- */
-
-static DEFINE_PER_CPU(int, sockets_in_use);
-
 /*
  * Support routines.
  * Move socket addresses back and forth across the kernel/user
@@ -578,7 +572,6 @@ struct socket *sock_alloc(void)
        inode->i_gid = current_fsgid();
        inode->i_op = &sockfs_inode_ops;
 
-       this_cpu_add(sockets_in_use, 1);
        return sock;
 }
 EXPORT_SYMBOL(sock_alloc);
@@ -605,7 +598,6 @@ void sock_release(struct socket *sock)
        if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
                pr_err("%s: fasync list not empty!\n", __func__);
 
-       this_cpu_sub(sockets_in_use, 1);
        if (!sock->file) {
                iput(SOCK_INODE(sock));
                return;
@@ -2622,17 +2614,8 @@ core_initcall(sock_init);        /* early initcall */
 #ifdef CONFIG_PROC_FS
 void socket_seq_show(struct seq_file *seq)
 {
-       int cpu;
-       int counter = 0;
-
-       for_each_possible_cpu(cpu)
-           counter += per_cpu(sockets_in_use, cpu);
-
-       /* It can be negative, by the way. 8) */
-       if (counter < 0)
-               counter = 0;
-
-       seq_printf(seq, "sockets: used %d\n", counter);
+       seq_printf(seq, "sockets: used %d\n",
+                  sock_inuse_get(seq->private));
 }
 #endif                         /* CONFIG_PROC_FS */
 
index 95fec2c057d6ebdb223e19ef83bf9c383cb2156e..7ebbdeb2a90e1d8429113b81370ad67f05ee633e 100644 (file)
@@ -351,8 +351,7 @@ void tipc_group_update_member(struct tipc_member *m, int len)
        if (m->window >= ADV_IDLE)
                return;
 
-       if (!list_empty(&m->congested))
-               return;
+       list_del_init(&m->congested);
 
        /* Sort member into congested members' list */
        list_for_each_entry_safe(_m, tmp, &grp->congested, congested) {
@@ -648,6 +647,7 @@ static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
        } else if (mtyp == GRP_REMIT_MSG) {
                msg_set_grp_remitted(hdr, m->window);
        }
+       msg_set_dest_droppable(hdr, true);
        __skb_queue_tail(xmitq, skb);
 }
 
@@ -689,15 +689,16 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
                        msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
                        __skb_queue_tail(inputq, m->event_msg);
                }
-               if (m->window < ADV_IDLE)
-                       tipc_group_update_member(m, 0);
-               else
-                       list_del_init(&m->congested);
+               list_del_init(&m->congested);
+               tipc_group_update_member(m, 0);
                return;
        case GRP_LEAVE_MSG:
                if (!m)
                        return;
                m->bc_syncpt = msg_grp_bc_syncpt(hdr);
+               list_del_init(&m->list);
+               list_del_init(&m->congested);
+               *usr_wakeup = true;
 
                /* Wait until WITHDRAW event is received */
                if (m->state != MBR_LEAVING) {
@@ -709,8 +710,6 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
                ehdr = buf_msg(m->event_msg);
                msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
                __skb_queue_tail(inputq, m->event_msg);
-               *usr_wakeup = true;
-               list_del_init(&m->congested);
                return;
        case GRP_ADV_MSG:
                if (!m)
@@ -862,6 +861,7 @@ void tipc_group_member_evt(struct tipc_group *grp,
                                msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
                        __skb_queue_tail(inputq, skb);
                }
+               list_del_init(&m->list);
                list_del_init(&m->congested);
        }
        *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
index d7d6cb00c47bbab3c963f4b83839aadcc94606a1..1d84f91bbfb0c8c9087e309821eb687325733358 100644 (file)
@@ -23,27 +23,14 @@ ifneq ($(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR),)
 cfg80211-y += extra-certs.o
 endif
 
-$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.x509)
+$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
        @$(kecho) "  GEN     $@"
-       @(set -e; \
-         allf=""; \
-         for f in $^ ; do \
-             # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \
-             thisf=$$(od -An -v -tx1 < $$f | \
-                          sed -e 's/ /\n/g' | \
-                          sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \
-                          sed -e 's/^/0x/;s/$$/,/'); \
-             # file should not be empty - maybe command substitution failed? \
-             test ! -z "$$thisf";\
-             allf=$$allf$$thisf;\
-         done; \
-         ( \
-             echo '#include "reg.h"'; \
-             echo 'const u8 shipped_regdb_certs[] = {'; \
-             echo "$$allf"; \
-             echo '};'; \
-             echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
-         ) >> $@)
+       @(echo '#include "reg.h"'; \
+         echo 'const u8 shipped_regdb_certs[] = {'; \
+         cat $^ ; \
+         echo '};'; \
+         echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
+        ) > $@
 
 $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
                      $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509)
@@ -66,4 +53,6 @@ $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
              echo "$$allf"; \
              echo '};'; \
              echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);'; \
-         ) >> $@)
+         ) > $@)
+
+clean-files += shipped-certs.c extra-certs.c
diff --git a/net/wireless/certs/sforshee.hex b/net/wireless/certs/sforshee.hex
new file mode 100644 (file)
index 0000000..14ea666
--- /dev/null
@@ -0,0 +1,86 @@
+/* Seth Forshee's regdb certificate */
+0x30, 0x82, 0x02, 0xa4, 0x30, 0x82, 0x01, 0x8c,
+0x02, 0x09, 0x00, 0xb2, 0x8d, 0xdf, 0x47, 0xae,
+0xf9, 0xce, 0xa7, 0x30, 0x0d, 0x06, 0x09, 0x2a,
+0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b,
+0x05, 0x00, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f,
+0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73,
+0x66, 0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30,
+0x20, 0x17, 0x0d, 0x31, 0x37, 0x31, 0x30, 0x30,
+0x36, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35, 0x5a,
+0x18, 0x0f, 0x32, 0x31, 0x31, 0x37, 0x30, 0x39,
+0x31, 0x32, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35,
+0x5a, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f, 0x06,
+0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73, 0x66,
+0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30, 0x82,
+0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86,
+0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05,
+0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82,
+0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xb5,
+0x40, 0xe3, 0x9c, 0x28, 0x84, 0x39, 0x03, 0xf2,
+0x39, 0xd7, 0x66, 0x2c, 0x41, 0x38, 0x15, 0xac,
+0x7e, 0xa5, 0x83, 0x71, 0x25, 0x7e, 0x90, 0x7c,
+0x68, 0xdd, 0x6f, 0x3f, 0xd9, 0xd7, 0x59, 0x38,
+0x9f, 0x7c, 0x6a, 0x52, 0xc2, 0x03, 0x2a, 0x2d,
+0x7e, 0x66, 0xf4, 0x1e, 0xb3, 0x12, 0x70, 0x20,
+0x5b, 0xd4, 0x97, 0x32, 0x3d, 0x71, 0x8b, 0x3b,
+0x1b, 0x08, 0x17, 0x14, 0x6b, 0x61, 0xc4, 0x57,
+0x8b, 0x96, 0x16, 0x1c, 0xfd, 0x24, 0xd5, 0x0b,
+0x09, 0xf9, 0x68, 0x11, 0x84, 0xfb, 0xca, 0x51,
+0x0c, 0xd1, 0x45, 0x19, 0xda, 0x10, 0x44, 0x8a,
+0xd9, 0xfe, 0x76, 0xa9, 0xfd, 0x60, 0x2d, 0x18,
+0x0b, 0x28, 0x95, 0xb2, 0x2d, 0xea, 0x88, 0x98,
+0xb8, 0xd1, 0x56, 0x21, 0xf0, 0x53, 0x1f, 0xf1,
+0x02, 0x6f, 0xe9, 0x46, 0x9b, 0x93, 0x5f, 0x28,
+0x90, 0x0f, 0xac, 0x36, 0xfa, 0x68, 0x23, 0x71,
+0x57, 0x56, 0xf6, 0xcc, 0xd3, 0xdf, 0x7d, 0x2a,
+0xd9, 0x1b, 0x73, 0x45, 0xeb, 0xba, 0x27, 0x85,
+0xef, 0x7a, 0x7f, 0xa5, 0xcb, 0x80, 0xc7, 0x30,
+0x36, 0xd2, 0x53, 0xee, 0xec, 0xac, 0x1e, 0xe7,
+0x31, 0xf1, 0x36, 0xa2, 0x9c, 0x63, 0xc6, 0x65,
+0x5b, 0x7f, 0x25, 0x75, 0x68, 0xa1, 0xea, 0xd3,
+0x7e, 0x00, 0x5c, 0x9a, 0x5e, 0xd8, 0x20, 0x18,
+0x32, 0x77, 0x07, 0x29, 0x12, 0x66, 0x1e, 0x36,
+0x73, 0xe7, 0x97, 0x04, 0x41, 0x37, 0xb1, 0xb1,
+0x72, 0x2b, 0xf4, 0xa1, 0x29, 0x20, 0x7c, 0x96,
+0x79, 0x0b, 0x2b, 0xd0, 0xd8, 0xde, 0xc8, 0x6c,
+0x3f, 0x93, 0xfb, 0xc5, 0xee, 0x78, 0x52, 0x11,
+0x15, 0x1b, 0x7a, 0xf6, 0xe2, 0x68, 0x99, 0xe7,
+0xfb, 0x46, 0x16, 0x84, 0xe3, 0xc7, 0xa1, 0xe6,
+0xe0, 0xd2, 0x46, 0xd5, 0xe1, 0xc4, 0x5f, 0xa0,
+0x66, 0xf4, 0xda, 0xc4, 0xff, 0x95, 0x1d, 0x02,
+0x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09,
+0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
+0x0b, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00,
+0x87, 0x03, 0xda, 0xf2, 0x82, 0xc2, 0xdd, 0xaf,
+0x7c, 0x44, 0x2f, 0x86, 0xd3, 0x5f, 0x4c, 0x93,
+0x48, 0xb9, 0xfe, 0x07, 0x17, 0xbb, 0x21, 0xf7,
+0x25, 0x23, 0x4e, 0xaa, 0x22, 0x0c, 0x16, 0xb9,
+0x73, 0xae, 0x9d, 0x46, 0x7c, 0x75, 0xd9, 0xc3,
+0x49, 0x57, 0x47, 0xbf, 0x33, 0xb7, 0x97, 0xec,
+0xf5, 0x40, 0x75, 0xc0, 0x46, 0x22, 0xf0, 0xa0,
+0x5d, 0x9c, 0x79, 0x13, 0xa1, 0xff, 0xb8, 0xa3,
+0x2f, 0x7b, 0x8e, 0x06, 0x3f, 0xc8, 0xb6, 0xe4,
+0x6a, 0x28, 0xf2, 0x34, 0x5c, 0x23, 0x3f, 0x32,
+0xc0, 0xe6, 0xad, 0x0f, 0xac, 0xcf, 0x55, 0x74,
+0x47, 0x73, 0xd3, 0x01, 0x85, 0xb7, 0x0b, 0x22,
+0x56, 0x24, 0x7d, 0x9f, 0x09, 0xa9, 0x0e, 0x86,
+0x9e, 0x37, 0x5b, 0x9c, 0x6d, 0x02, 0xd9, 0x8c,
+0xc8, 0x50, 0x6a, 0xe2, 0x59, 0xf3, 0x16, 0x06,
+0xea, 0xb2, 0x42, 0xb5, 0x58, 0xfe, 0xba, 0xd1,
+0x81, 0x57, 0x1a, 0xef, 0xb2, 0x38, 0x88, 0x58,
+0xf6, 0xaa, 0xc4, 0x2e, 0x8b, 0x5a, 0x27, 0xe4,
+0xa5, 0xe8, 0xa4, 0xca, 0x67, 0x5c, 0xac, 0x72,
+0x67, 0xc3, 0x6f, 0x13, 0xc3, 0x2d, 0x35, 0x79,
+0xd7, 0x8a, 0xe7, 0xf5, 0xd4, 0x21, 0x30, 0x4a,
+0xd5, 0xf6, 0xa3, 0xd9, 0x79, 0x56, 0xf2, 0x0f,
+0x10, 0xf7, 0x7d, 0xd0, 0x51, 0x93, 0x2f, 0x47,
+0xf8, 0x7d, 0x4b, 0x0a, 0x84, 0x55, 0x12, 0x0a,
+0x7d, 0x4e, 0x3b, 0x1f, 0x2b, 0x2f, 0xfc, 0x28,
+0xb3, 0x69, 0x34, 0xe1, 0x80, 0x80, 0xbb, 0xe2,
+0xaf, 0xb9, 0xd6, 0x30, 0xf1, 0x1d, 0x54, 0x87,
+0x23, 0x99, 0x9f, 0x51, 0x03, 0x4c, 0x45, 0x7d,
+0x02, 0x65, 0x73, 0xab, 0xfd, 0xcf, 0x94, 0xcc,
+0x0d, 0x3a, 0x60, 0xfd, 0x3c, 0x14, 0x2f, 0x16,
+0x33, 0xa9, 0x21, 0x1f, 0xcb, 0x50, 0xb1, 0x8f,
+0x03, 0xee, 0xa0, 0x66, 0xa9, 0x16, 0x79, 0x14,
diff --git a/net/wireless/certs/sforshee.x509 b/net/wireless/certs/sforshee.x509
deleted file mode 100644 (file)
index c6f8f9d..0000000
Binary files a/net/wireless/certs/sforshee.x509 and /dev/null differ
index b1ac23ca20c86be0af71e9a1ba92cc99d8d5a967..213d0c498c97d78b17c81d1fd8b850c8768f7057 100644 (file)
@@ -2610,7 +2610,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
        case NL80211_IFTYPE_AP:
                if (wdev->ssid_len &&
                    nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
-                       goto nla_put_failure;
+                       goto nla_put_failure_locked;
                break;
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
@@ -2623,7 +2623,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
                if (!ssid_ie)
                        break;
                if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
-                       goto nla_put_failure;
+                       goto nla_put_failure_locked;
                break;
                }
        default:
@@ -2635,6 +2635,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
        genlmsg_end(msg, hdr);
        return 0;
 
+ nla_put_failure_locked:
+       wdev_unlock(wdev);
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
        return -EMSGSIZE;
index 00641b611aeda4985b2479b9d2f303451762f8d2..75982506617b35208f1ce6534eb2ccff6eeef9c2 100644 (file)
 #include <linux/notifier.h>
 
 #ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
 {
        int err;
+       unsigned long flags;
        struct xfrm_state *x;
+       struct sk_buff *skb2;
+       struct softnet_data *sd;
+       netdev_features_t esp_features = features;
        struct xfrm_offload *xo = xfrm_offload(skb);
 
-       if (skb_is_gso(skb))
-               return 0;
+       if (!xo)
+               return skb;
 
-       if (xo) {
-               x = skb->sp->xvec[skb->sp->len - 1];
-               if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
-                       return 0;
+       if (!(features & NETIF_F_HW_ESP))
+               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+
+       x = skb->sp->xvec[skb->sp->len - 1];
+       if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
+               return skb;
+
+       local_irq_save(flags);
+       sd = this_cpu_ptr(&softnet_data);
+       err = !skb_queue_empty(&sd->xfrm_backlog);
+       local_irq_restore(flags);
 
+       if (err) {
+               *again = true;
+               return skb;
+       }
+
+       if (skb_is_gso(skb)) {
+               struct net_device *dev = skb->dev;
+
+               if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
+                       struct sk_buff *segs;
+
+                       /* Packet got rerouted, fixup features and segment it. */
+                       esp_features = esp_features & ~(NETIF_F_HW_ESP
+                                                       | NETIF_F_GSO_ESP);
+
+                       segs = skb_gso_segment(skb, esp_features);
+                       if (IS_ERR(segs)) {
+                               kfree_skb(skb);
+                               atomic_long_inc(&dev->tx_dropped);
+                               return NULL;
+                       } else {
+                               consume_skb(skb);
+                               skb = segs;
+                       }
+               }
+       }
+
+       if (!skb->next) {
                x->outer_mode->xmit(x, skb);
 
-               err = x->type_offload->xmit(x, skb, features);
+               xo->flags |= XFRM_DEV_RESUME;
+
+               err = x->type_offload->xmit(x, skb, esp_features);
                if (err) {
+                       if (err == -EINPROGRESS)
+                               return NULL;
+
                        XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
-                       return err;
+                       kfree_skb(skb);
+                       return NULL;
                }
 
                skb_push(skb, skb->data - skb_mac_header(skb));
+
+               return skb;
        }
 
-       return 0;
+       skb2 = skb;
+
+       do {
+               struct sk_buff *nskb = skb2->next;
+               skb2->next = NULL;
+
+               xo = xfrm_offload(skb2);
+               xo->flags |= XFRM_DEV_RESUME;
+
+               x->outer_mode->xmit(x, skb2);
+
+               err = x->type_offload->xmit(x, skb2, esp_features);
+               if (!err) {
+                       skb2->next = nskb;
+               } else if (err != -EINPROGRESS) {
+                       XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+                       skb2->next = nskb;
+                       kfree_skb_list(skb2);
+                       return NULL;
+               } else {
+                       if (skb == skb2)
+                               skb = nskb;
+
+                       if (!skb)
+                               return NULL;
+
+                       goto skip_push;
+               }
+
+               skb_push(skb2, skb2->data - skb_mac_header(skb2));
+
+skip_push:
+               skb2 = nskb;
+       } while (skb2);
+
+       return skb;
 }
 EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
 
@@ -120,8 +202,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
        if (!x->type_offload || x->encap)
                return false;
 
-       if ((x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev)) &&
-            !xdst->child->xfrm && x->type->get_mtu) {
+       if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) &&
+            (!xdst->child->xfrm && x->type->get_mtu)) {
                mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
 
                if (skb->len <= mtu)
@@ -140,19 +222,82 @@ ok:
        return true;
 }
 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
+
+void xfrm_dev_resume(struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       int ret = NETDEV_TX_BUSY;
+       struct netdev_queue *txq;
+       struct softnet_data *sd;
+       unsigned long flags;
+
+       rcu_read_lock();
+       txq = netdev_pick_tx(dev, skb, NULL);
+
+       HARD_TX_LOCK(dev, txq, smp_processor_id());
+       if (!netif_xmit_frozen_or_stopped(txq))
+               skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+       HARD_TX_UNLOCK(dev, txq);
+
+       if (!dev_xmit_complete(ret)) {
+               local_irq_save(flags);
+               sd = this_cpu_ptr(&softnet_data);
+               skb_queue_tail(&sd->xfrm_backlog, skb);
+               raise_softirq_irqoff(NET_TX_SOFTIRQ);
+               local_irq_restore(flags);
+       }
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(xfrm_dev_resume);
+
+void xfrm_dev_backlog(struct softnet_data *sd)
+{
+       struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
+       struct sk_buff_head list;
+       struct sk_buff *skb;
+
+       if (skb_queue_empty(xfrm_backlog))
+               return;
+
+       __skb_queue_head_init(&list);
+
+       spin_lock(&xfrm_backlog->lock);
+       skb_queue_splice_init(xfrm_backlog, &list);
+       spin_unlock(&xfrm_backlog->lock);
+
+       while (!skb_queue_empty(&list)) {
+               skb = __skb_dequeue(&list);
+               xfrm_dev_resume(skb);
+       }
+
+}
 #endif
 
-static int xfrm_dev_register(struct net_device *dev)
+static int xfrm_api_check(struct net_device *dev)
 {
-       if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
-               return NOTIFY_BAD;
+#ifdef CONFIG_XFRM_OFFLOAD
        if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
            !(dev->features & NETIF_F_HW_ESP))
                return NOTIFY_BAD;
 
+       if ((dev->features & NETIF_F_HW_ESP) &&
+           (!(dev->xfrmdev_ops &&
+              dev->xfrmdev_ops->xdo_dev_state_add &&
+              dev->xfrmdev_ops->xdo_dev_state_delete)))
+               return NOTIFY_BAD;
+#else
+       if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
+               return NOTIFY_BAD;
+#endif
+
        return NOTIFY_DONE;
 }
 
+static int xfrm_dev_register(struct net_device *dev)
+{
+       return xfrm_api_check(dev);
+}
+
 static int xfrm_dev_unregister(struct net_device *dev)
 {
        xfrm_policy_cache_flush();
@@ -161,16 +306,7 @@ static int xfrm_dev_unregister(struct net_device *dev)
 
 static int xfrm_dev_feat_change(struct net_device *dev)
 {
-       if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
-               return NOTIFY_BAD;
-       else if (!(dev->features & NETIF_F_HW_ESP))
-               dev->xfrmdev_ops = NULL;
-
-       if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
-           !(dev->features & NETIF_F_HW_ESP))
-               return NOTIFY_BAD;
-
-       return NOTIFY_DONE;
+       return xfrm_api_check(dev);
 }
 
 static int xfrm_dev_down(struct net_device *dev)
index b3b353d7252724e10f23a9288cd24aab3ef34007..f055ca10bbc1d33c9c1cee1fd913b7c930984ac1 100644 (file)
@@ -579,15 +579,14 @@ static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream,
        return 0;
 }
 
-int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
+static int __snd_rawmidi_info_select(struct snd_card *card,
+                                    struct snd_rawmidi_info *info)
 {
        struct snd_rawmidi *rmidi;
        struct snd_rawmidi_str *pstr;
        struct snd_rawmidi_substream *substream;
 
-       mutex_lock(&register_mutex);
        rmidi = snd_rawmidi_search(card, info->device);
-       mutex_unlock(&register_mutex);
        if (!rmidi)
                return -ENXIO;
        if (info->stream < 0 || info->stream > 1)
@@ -603,6 +602,16 @@ int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info
        }
        return -ENXIO;
 }
+
+int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
+{
+       int ret;
+
+       mutex_lock(&register_mutex);
+       ret = __snd_rawmidi_info_select(card, info);
+       mutex_unlock(&register_mutex);
+       return ret;
+}
 EXPORT_SYMBOL(snd_rawmidi_info_select);
 
 static int snd_rawmidi_info_select_user(struct snd_card *card,
index c19c81d230bd7423b4153d2266a45e09333f8714..b4f1b6e88305496f91d028ceb82fe9b8a6a60ccb 100644 (file)
@@ -55,10 +55,11 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
 #define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
 #define is_geminilake(codec) (((codec)->core.vendor_id == 0x8086280d) || \
                                ((codec)->core.vendor_id == 0x80862800))
+#define is_cannonlake(codec) ((codec)->core.vendor_id == 0x8086280c)
 #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
                                || is_skylake(codec) || is_broxton(codec) \
-                               || is_kabylake(codec)) || is_geminilake(codec)
-
+                               || is_kabylake(codec)) || is_geminilake(codec) \
+                               || is_cannonlake(codec)
 #define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
 #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
 #define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
@@ -3841,6 +3842,7 @@ HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI",     patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI",    patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI",    patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI",   patch_i915_hsw_hdmi),
+HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
index 4b21f71d685c78fd00345b2e229541b493c614f0..6a4db00511ab14593e8a0d33500c547a2e9656ae 100644 (file)
@@ -5185,6 +5185,22 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
        }
 }
 
+/* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */
+static void alc274_fixup_bind_dacs(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+       static hda_nid_t preferred_pairs[] = {
+               0x21, 0x03, 0x1b, 0x03, 0x16, 0x02,
+               0
+       };
+
+       if (action != HDA_FIXUP_ACT_PRE_PROBE)
+               return;
+
+       spec->gen.preferred_dacs = preferred_pairs;
+}
+
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
@@ -5302,6 +5318,8 @@ enum {
        ALC233_FIXUP_LENOVO_MULTI_CODECS,
        ALC294_FIXUP_LENOVO_MIC_LOCATION,
        ALC700_FIXUP_INTEL_REFERENCE,
+       ALC274_FIXUP_DELL_BIND_DACS,
+       ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6112,6 +6130,21 @@ static const struct hda_fixup alc269_fixups[] = {
                        {}
                }
        },
+       [ALC274_FIXUP_DELL_BIND_DACS] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc274_fixup_bind_dacs,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+       },
+       [ALC274_FIXUP_DELL_AIO_LINEOUT_VERB] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x0401102f },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC274_FIXUP_DELL_BIND_DACS
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6578,7 +6611,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x1b, 0x90a70130},
                {0x21, 0x03211020}),
-       SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+       SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
                {0x12, 0xb7a60130},
                {0x13, 0xb8a61140},
                {0x16, 0x90170110},
index 7c9e361b2200be081aca8f5a99d1b71a5846d30b..2b4ceda36291c01c6cca69d3a1cacd6c23014f40 100644 (file)
@@ -2173,20 +2173,25 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
        kctl->private_value = (unsigned long)namelist;
        kctl->private_free = usb_mixer_selector_elem_free;
 
-       nameid = uac_selector_unit_iSelector(desc);
+       /* check the static mapping table at first */
        len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
-       if (len)
-               ;
-       else if (nameid)
-               len = snd_usb_copy_string_desc(state, nameid, kctl->id.name,
-                                        sizeof(kctl->id.name));
-       else
-               len = get_term_name(state, &state->oterm,
-                                   kctl->id.name, sizeof(kctl->id.name), 0);
-
        if (!len) {
-               strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
+               /* no mapping ? */
+               /* if iSelector is given, use it */
+               nameid = uac_selector_unit_iSelector(desc);
+               if (nameid)
+                       len = snd_usb_copy_string_desc(state, nameid,
+                                                      kctl->id.name,
+                                                      sizeof(kctl->id.name));
+               /* ... or pick up the terminal name at next */
+               if (!len)
+                       len = get_term_name(state, &state->oterm,
+                                   kctl->id.name, sizeof(kctl->id.name), 0);
+               /* ... or use the fixed string "USB" as the last resort */
+               if (!len)
+                       strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
 
+               /* and add the proper suffix */
                if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
                        append_ctl_name(kctl, " Clock Source");
                else if ((state->oterm.type & 0xff00) == 0x0100)
index 77eecaa4db1f32c9b7af87273c599181bf307443..a66ef5777887a78d7416e64c049c73b26477c7f7 100644 (file)
@@ -1166,10 +1166,11 @@ static bool is_marantz_denon_dac(unsigned int id)
 /* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
  * between PCM/DOP and native DSD mode
  */
-static bool is_teac_50X_dac(unsigned int id)
+static bool is_teac_dsd_dac(unsigned int id)
 {
        switch (id) {
        case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
+       case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */
                return true;
        }
        return false;
@@ -1202,7 +1203,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
                        break;
                }
                mdelay(20);
-       } else if (is_teac_50X_dac(subs->stream->chip->usb_id)) {
+       } else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) {
                /* Vendor mode switch cmd is required. */
                switch (fmt->altsetting) {
                case 3: /* DSD mode (DSD_U32) requested */
@@ -1392,7 +1393,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        }
 
        /* TEAC devices with USB DAC functionality */
-       if (is_teac_50X_dac(chip->usb_id)) {
+       if (is_teac_dsd_dac(chip->usb_id)) {
                if (fp->altsetting == 3)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
        }
index cefe7c7cd4f6f29fabd90e33495d7a6d8ac6dbdd..0a8e37a519f258e72317f39a87ac9cc60ad47f6f 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
 #define _UAPI__ASM_BPF_PERF_EVENT_H__
 
-#include <asm/ptrace.h>
+#include "ptrace.h"
 
 typedef user_pt_regs bpf_user_pt_regs_t;
 
index 217cf6f95c366037ccd2ff3cedb1d61de22c616a..a5684d0968b4fd087905e659c0ce80bd170434c2 100755 (executable)
@@ -478,7 +478,7 @@ class Provider(object):
     @staticmethod
     def is_field_wanted(fields_filter, field):
         """Indicate whether field is valid according to fields_filter."""
-        if not fields_filter or fields_filter == "help":
+        if not fields_filter:
             return True
         return re.match(fields_filter, field) is not None
 
@@ -549,8 +549,8 @@ class TracepointProvider(Provider):
 
     def update_fields(self, fields_filter):
         """Refresh fields, applying fields_filter"""
-        self._fields = [field for field in self.get_available_fields()
-                        if self.is_field_wanted(fields_filter, field)]
+        self.fields = [field for field in self.get_available_fields()
+                       if self.is_field_wanted(fields_filter, field)]
 
     @staticmethod
     def get_online_cpus():
@@ -950,7 +950,8 @@ class Tui(object):
             curses.nocbreak()
             curses.endwin()
 
-    def get_all_gnames(self):
+    @staticmethod
+    def get_all_gnames():
         """Returns a list of (pid, gname) tuples of all running guests"""
         res = []
         try:
@@ -963,7 +964,7 @@ class Tui(object):
             # perform a sanity check before calling the more expensive
             # function to possibly extract the guest name
             if ' -name ' in line[1]:
-                res.append((line[0], self.get_gname_from_pid(line[0])))
+                res.append((line[0], Tui.get_gname_from_pid(line[0])))
         child.stdout.close()
 
         return res
@@ -984,7 +985,8 @@ class Tui(object):
         except Exception:
             self.screen.addstr(row + 1, 2, 'Not available')
 
-    def get_pid_from_gname(self, gname):
+    @staticmethod
+    def get_pid_from_gname(gname):
         """Fuzzy function to convert guest name to QEMU process pid.
 
         Returns a list of potential pids, can be empty if no match found.
@@ -992,7 +994,7 @@ class Tui(object):
 
         """
         pids = []
-        for line in self.get_all_gnames():
+        for line in Tui.get_all_gnames():
             if gname == line[1]:
                 pids.append(int(line[0]))
 
@@ -1090,15 +1092,16 @@ class Tui(object):
             # sort by totals
             return (0, -stats[x][0])
         total = 0.
-        for val in stats.values():
-            total += val[0]
+        for key in stats.keys():
+            if key.find('(') is -1:
+                total += stats[key][0]
         if self._sorting == SORT_DEFAULT:
             sortkey = sortCurAvg
         else:
             sortkey = sortTotal
+        tavg = 0
         for key in sorted(stats.keys(), key=sortkey):
-
-            if row >= self.screen.getmaxyx()[0]:
+            if row >= self.screen.getmaxyx()[0] - 1:
                 break
             values = stats[key]
             if not values[0] and not values[1]:
@@ -1110,9 +1113,15 @@ class Tui(object):
                 self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %
                                    (key, values[0], values[0] * 100 / total,
                                     cur))
+                if cur is not '' and key.find('(') is -1:
+                    tavg += cur
             row += 1
         if row == 3:
             self.screen.addstr(4, 1, 'No matching events reported yet')
+        else:
+            self.screen.addstr(row, 1, '%-40s %10d        %8s' %
+                               ('Total', total, tavg if tavg else ''),
+                               curses.A_BOLD)
         self.screen.refresh()
 
     def show_msg(self, text):
@@ -1358,7 +1367,7 @@ class Tui(object):
                 if char == 'x':
                     self.update_drilldown()
                     # prevents display of current values on next refresh
-                    self.stats.get()
+                    self.stats.get(self._display_guests)
             except KeyboardInterrupt:
                 break
             except curses.error:
@@ -1451,16 +1460,13 @@ Press any other key to refresh statistics immediately.
         try:
             pids = Tui.get_pid_from_gname(val)
         except:
-            raise optparse.OptionValueError('Error while searching for guest '
-                                            '"{}", use "-p" to specify a pid '
-                                            'instead'.format(val))
+            sys.exit('Error while searching for guest "{}". Use "-p" to '
+                     'specify a pid instead?'.format(val))
         if len(pids) == 0:
-            raise optparse.OptionValueError('No guest by the name "{}" '
-                                            'found'.format(val))
+            sys.exit('Error: No guest by the name "{}" found'.format(val))
         if len(pids) > 1:
-            raise optparse.OptionValueError('Multiple processes found (pids: '
-                                            '{}) - use "-p" to specify a pid '
-                                            'instead'.format(" ".join(pids)))
+            sys.exit('Error: Multiple processes found (pids: {}). Use "-p" '
+                     'to specify the desired pid'.format(" ".join(pids)))
         parser.values.pid = pids[0]
 
     optparser = optparse.OptionParser(description=description_text,
@@ -1518,7 +1524,16 @@ Press any other key to refresh statistics immediately.
                          help='restrict statistics to guest by name',
                          callback=cb_guest_to_pid,
                          )
-    (options, _) = optparser.parse_args(sys.argv)
+    options, unkn = optparser.parse_args(sys.argv)
+    if len(unkn) != 1:
+        sys.exit('Error: Extra argument(s): ' + ' '.join(unkn[1:]))
+    try:
+        # verify that we were passed a valid regex up front
+        re.compile(options.fields)
+    except re.error:
+        sys.exit('Error: "' + options.fields + '" is not a valid regular '
+                 'expression')
+
     return options
 
 
@@ -1564,16 +1579,13 @@ def main():
 
     stats = Stats(options)
 
-    if options.fields == "help":
-        event_list = "\n"
-        s = stats.get()
-        for key in s.keys():
-            if key.find('(') != -1:
-                key = key[0:key.find('(')]
-            if event_list.find('\n' + key + '\n') == -1:
-                event_list += key + '\n'
-        sys.stdout.write(event_list)
-        return ""
+    if options.fields == 'help':
+        stats.fields_filter = None
+        event_list = []
+        for key in stats.get().keys():
+            event_list.append(key.split('(', 1)[0])
+        sys.stdout.write('  ' + '\n  '.join(sorted(set(event_list))) + '\n')
+        sys.exit(0)
 
     if options.log:
         log(stats)
index e5cf836be8a1848bb82f39cfa3c7c75dcc67b4fa..b5b3810c9e945d7f3a39568840fbc5b73f84983b 100644 (file)
@@ -50,6 +50,8 @@ INTERACTIVE COMMANDS
 *s*::   set update interval
 
 *x*::  toggle reporting of stats for child trace events
+ ::     *Note*: The stats for the parents summarize the respective child trace
+                events
 
 Press any other key to refresh statistics immediately.
 
@@ -86,7 +88,7 @@ OPTIONS
 
 -f<fields>::
 --fields=<fields>::
-       fields to display (regex)
+       fields to display (regex), "-f help" for a list of available events
 
 -h::
 --help::
index a1fcb0c31d0209e17d0e53ddaad979ec77a7e56f..f1fdb36269f22ba1ff0b369597e2ec3e12401b59 100644 (file)
@@ -11,7 +11,7 @@ ifneq ($(wildcard $(GENHDR)),)
 endif
 
 CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
-LDLIBS += -lcap -lelf
+LDLIBS += -lcap -lelf -lrt
 
 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
        test_align test_verifier_log test_dev_cgroup
index 3914f7a4585a86fe2d28058a14f9ba14a9fd50f0..c940505c2978d71e175a3170298366b5a6a6cd5f 100755 (executable)
@@ -647,8 +647,8 @@ try:
 
     start_test("Test asking for TC offload of two filters...")
     sim.cls_bpf_add_filter(obj, da=True, skip_sw=True)
-    sim.cls_bpf_add_filter(obj, da=True, skip_sw=True)
-    # The above will trigger a splat until TC cls_bpf drivers are fixed
+    ret, _ = sim.cls_bpf_add_filter(obj, da=True, skip_sw=True, fail=False)
+    fail(ret == 0, "Managed to offload two TC filters at the same time")
 
     sim.tc_flush_filters(bound=2, total=2)
 
index 6472ca98690eb6006af9c39787ab54a6303a0140..09087ab122936271b1f1ab7c2bdd58ef0efe041a 100644 (file)
@@ -441,7 +441,7 @@ static void test_bpf_obj_id(void)
                          info_len != sizeof(struct bpf_map_info) ||
                          strcmp((char *)map_infos[i].name, expected_map_name),
                          "get-map-info(fd)",
-                         "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
+                         "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
                          err, errno,
                          map_infos[i].type, BPF_MAP_TYPE_ARRAY,
                          info_len, sizeof(struct bpf_map_info),
@@ -485,7 +485,7 @@ static void test_bpf_obj_id(void)
                          *(int *)prog_infos[i].map_ids != map_infos[i].id ||
                          strcmp((char *)prog_infos[i].name, expected_prog_name),
                          "get-prog-info(fd)",
-                         "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
+                         "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
                          err, errno, i,
                          prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
                          info_len, sizeof(struct bpf_prog_info),
@@ -553,7 +553,7 @@ static void test_bpf_obj_id(void)
                      memcmp(&prog_info, &prog_infos[i], info_len) ||
                      *(int *)prog_info.map_ids != saved_map_id,
                      "get-prog-info(next_id->fd)",
-                     "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n",
+                     "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
                      err, errno, info_len, sizeof(struct bpf_prog_info),
                      memcmp(&prog_info, &prog_infos[i], info_len),
                      *(int *)prog_info.map_ids, saved_map_id);
@@ -599,7 +599,7 @@ static void test_bpf_obj_id(void)
                      memcmp(&map_info, &map_infos[i], info_len) ||
                      array_value != array_magic_value,
                      "check get-map-info(next_id->fd)",
-                     "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n",
+                     "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
                      err, errno, info_len, sizeof(struct bpf_map_info),
                      memcmp(&map_info, &map_infos[i], info_len),
                      array_value, array_magic_value);
index 5d0a574ce2700fc2a99919e5db497787701de1d6..543847957fdd046e0f04bc1423189ada9e3f3df3 100644 (file)
@@ -423,9 +423,7 @@ static struct bpf_test tests[] = {
                        BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr_unpriv = "R1 subtraction from stack pointer",
-               .result_unpriv = REJECT,
-               .errstr = "R1 invalid mem access",
+               .errstr = "R1 subtraction from stack pointer",
                .result = REJECT,
        },
        {
@@ -607,7 +605,6 @@ static struct bpf_test tests[] = {
                },
                .errstr = "misaligned stack access",
                .result = REJECT,
-               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
        },
        {
                "invalid map_fd for function call",
@@ -1798,7 +1795,6 @@ static struct bpf_test tests[] = {
                },
                .result = REJECT,
                .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
-               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
        },
        {
                "PTR_TO_STACK store/load - bad alignment on reg",
@@ -1811,7 +1807,6 @@ static struct bpf_test tests[] = {
                },
                .result = REJECT,
                .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
-               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
        },
        {
                "PTR_TO_STACK store/load - out of bounds low",
@@ -1863,9 +1858,8 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
-               .result_unpriv = REJECT,
-               .errstr_unpriv = "R1 pointer += pointer",
+               .result = REJECT,
+               .errstr = "R1 pointer += pointer",
        },
        {
                "unpriv: neg pointer",
@@ -2593,7 +2587,8 @@ static struct bpf_test tests[] = {
                        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                                    offsetof(struct __sk_buff, data)),
                        BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, len)),
                        BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
                        BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
                        BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
@@ -2900,7 +2895,7 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "invalid access to packet",
+               .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
@@ -3886,9 +3881,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3, 11 },
-               .errstr_unpriv = "R0 pointer += pointer",
-               .errstr = "R0 invalid mem access 'inv'",
-               .result_unpriv = REJECT,
+               .errstr = "R0 pointer += pointer",
                .result = REJECT,
                .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
@@ -3929,7 +3922,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R4 invalid mem access",
+               .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
@@ -3950,7 +3943,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R4 invalid mem access",
+               .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
@@ -3971,7 +3964,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R4 invalid mem access",
+               .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
@@ -5196,10 +5189,8 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 bitwise operator &= on pointer",
-               .errstr = "invalid mem access 'inv'",
+               .errstr = "R0 bitwise operator &= on pointer",
                .result = REJECT,
-               .result_unpriv = REJECT,
        },
        {
                "map element value illegal alu op, 2",
@@ -5215,10 +5206,8 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
-               .errstr = "invalid mem access 'inv'",
+               .errstr = "R0 32-bit pointer arithmetic prohibited",
                .result = REJECT,
-               .result_unpriv = REJECT,
        },
        {
                "map element value illegal alu op, 3",
@@ -5234,10 +5223,8 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 pointer arithmetic with /= operator",
-               .errstr = "invalid mem access 'inv'",
+               .errstr = "R0 pointer arithmetic with /= operator",
                .result = REJECT,
-               .result_unpriv = REJECT,
        },
        {
                "map element value illegal alu op, 4",
@@ -6020,8 +6007,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map_in_map = { 3 },
-               .errstr = "R1 type=inv expected=map_ptr",
-               .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
+               .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
                .result = REJECT,
        },
        {
@@ -6117,6 +6103,30 @@ static struct bpf_test tests[] = {
                },
                .result = ACCEPT,
        },
+       {
+               "ld_abs: tests on r6 and skb data reload helper",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_B, 0),
+                       BPF_LD_ABS(BPF_H, 0),
+                       BPF_LD_ABS(BPF_W, 0),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_6, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       BPF_MOV64_IMM(BPF_REG_3, 2),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_vlan_push),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+                       BPF_LD_ABS(BPF_B, 0),
+                       BPF_LD_ABS(BPF_H, 0),
+                       BPF_LD_ABS(BPF_W, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+       },
        {
                "ld_ind: check calling conv, r1",
                .insns = {
@@ -6301,7 +6311,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6325,7 +6335,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6351,7 +6361,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R8 invalid mem access 'inv'",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6376,7 +6386,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R8 invalid mem access 'inv'",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6424,7 +6434,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6495,7 +6505,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6546,7 +6556,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6573,7 +6583,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6599,7 +6609,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6628,7 +6638,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6658,7 +6668,7 @@ static struct bpf_test tests[] = {
                        BPF_JMP_IMM(BPF_JA, 0, 0, -7),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6686,8 +6696,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr_unpriv = "R0 pointer comparison prohibited",
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
                .result_unpriv = REJECT,
        },
@@ -6742,6 +6751,462 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result = REJECT,
        },
+       {
+               "bounds check based on zero-extended MOV",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       /* r2 = 0x0000'0000'ffff'ffff */
+                       BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
+                       /* r2 = 0 */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+                       /* no-op */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       /* access at offset 0 */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT
+       },
+       {
+               "bounds check based on sign-extended MOV. test1",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       /* r2 = 0xffff'ffff'ffff'ffff */
+                       BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+                       /* r2 = 0xffff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+                       /* r0 = <oob pointer> */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       /* access to OOB pointer */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "map_value pointer and 4294967295",
+               .result = REJECT
+       },
+       {
+               "bounds check based on sign-extended MOV. test2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       /* r2 = 0xffff'ffff'ffff'ffff */
+                       BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+                       /* r2 = 0xfff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
+                       /* r0 = <oob pointer> */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       /* access to OOB pointer */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "R0 min value is outside of the array range",
+               .result = REJECT
+       },
+       {
+               "bounds check based on reg_off + var_off + insn_off. test1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "value_size=8 off=1073741825",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "bounds check based on reg_off + var_off + insn_off. test2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "value 1073741823",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "bounds check after truncation of non-boundary-crossing range",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       /* r2 = 0x10'0000'0000 */
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
+                       /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+                       /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
+                       /* r1 = 0 */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* no-op */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* access at offset 0 */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT
+       },
+       {
+               "bounds check after truncation of boundary-crossing range (1)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0xffff'ffff] or
+                        *      [0x0000'0000, 0x0000'007f]
+                        */
+                       BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0x00, 0xff] or
+                        *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = 0 or
+                        *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* no-op or OOB pointer computation */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               /* not actually fully unbounded, but the bound is very high */
+               .errstr = "R0 unbounded memory access",
+               .result = REJECT
+       },
+       {
+               "bounds check after truncation of boundary-crossing range (2)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0xffff'ffff] or
+                        *      [0x0000'0000, 0x0000'007f]
+                        * difference to previous test: truncation via MOV32
+                        * instead of ALU32.
+                        */
+                       BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0x00, 0xff] or
+                        *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = 0 or
+                        *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* no-op or OOB pointer computation */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               /* not actually fully unbounded, but the bound is very high */
+               .errstr = "R0 unbounded memory access",
+               .result = REJECT
+       },
+       {
+               "bounds check after wrapping 32-bit addition",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       /* r1 = 0x7fff'ffff */
+                       BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
+                       /* r1 = 0xffff'fffe */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       /* r1 = 0 */
+                       BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
+                       /* no-op */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* access at offset 0 */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT
+       },
+       {
+               "bounds check after shift with oversized count operand",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_IMM(BPF_REG_2, 32),
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       /* r1 = (u32)1 << (u32)32 = ? */
+                       BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
+                       /* r1 = [0x0000, 0xffff] */
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
+                       /* computes unknown pointer, potentially OOB */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "R0 max value is outside of the array range",
+               .result = REJECT
+       },
+       {
+               "bounds check after right shift of maybe-negative number",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       /* r1 = [-0x01, 0xfe] */
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
+                       /* r1 = 0 or 0xff'ffff'ffff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* r1 = 0 or 0xffff'ffff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* computes unknown pointer, potentially OOB */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "R0 unbounded memory access",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test1",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "map_value pointer and 2147483646",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "pointer offset 1073741822",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test3",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "pointer offset -1073741822",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test4",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_1, 1000000),
+                       BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "map_value pointer and 1000000000000",
+               .result = REJECT
+       },
+       {
+               "pointer/scalar confusion in state equality check (way 1)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_JMP_A(1),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 leaks addr as return value"
+       },
+       {
+               "pointer/scalar confusion in state equality check (way 2)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+                       BPF_JMP_A(1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 leaks addr as return value"
+       },
        {
                "variable-offset ctx access",
                .insns = {
@@ -6783,6 +7248,71 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_LWT_IN,
        },
+       {
+               "indirect variable-offset stack access",
+               .insns = {
+                       /* Fill the top 8 bytes of the stack */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       /* Get an unknown value */
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+                       /* Make it small and 4-byte aligned */
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+                       /* add it to fp.  We now have either fp-4 or fp-8, but
+                        * we don't know which
+                        */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+                       /* dereference it indirectly */
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 5 },
+               .errstr = "variable stack read R2",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_LWT_IN,
+       },
+       {
+               "direct stack access with 32-bit wraparound. test1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_EXIT_INSN()
+               },
+               .errstr = "fp pointer and 2147483647",
+               .result = REJECT
+       },
+       {
+               "direct stack access with 32-bit wraparound. test2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_EXIT_INSN()
+               },
+               .errstr = "fp pointer and 1073741823",
+               .result = REJECT
+       },
+       {
+               "direct stack access with 32-bit wraparound. test3",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_EXIT_INSN()
+               },
+               .errstr = "fp pointer offset 1073741822",
+               .result = REJECT
+       },
        {
                "liveness pruning and write screening",
                .insns = {
@@ -7104,6 +7634,19 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
+       {
+               "pkt_end - pkt_start is allowed",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
        {
                "XDP pkt read, pkt_end mangling, bad access 1",
                .insns = {
@@ -7119,7 +7662,7 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "R1 offset is outside of the packet",
+               .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
        },
@@ -7138,7 +7681,7 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "R1 offset is outside of the packet",
+               .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
        },
index e57b4ac40e72e0502dff75ea1d80c543280428eb..7177bea1fdfa62a1aa4e424d4dab665d8a9b7aaf 100644 (file)
@@ -1,3 +1,4 @@
 CONFIG_USER_NS=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_TEST_BPF=m
+CONFIG_NUMA=y
index 5215493166c986211fd574944c880917e4ac94aa..dada4ab6914221da2465874bd3b5530f7a65db97 100755 (executable)
@@ -502,6 +502,102 @@ kci_test_macsec()
        echo "PASS: macsec"
 }
 
+kci_test_gretap()
+{
+       testns="testns"
+       DEV_NS=gretap00
+       ret=0
+
+       ip netns add "$testns"
+       if [ $? -ne 0 ]; then
+               echo "SKIP gretap tests: cannot add net namespace $testns"
+               return 1
+       fi
+
+       ip link help gretap 2>&1 | grep -q "^Usage:"
+       if [ $? -ne 0 ];then
+               echo "SKIP: gretap: iproute2 too old"
+               return 1
+       fi
+
+       # test native tunnel
+       ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap seq \
+               key 102 local 172.16.1.100 remote 172.16.1.200
+       check_err $?
+
+       ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+       check_err $?
+
+       ip netns exec "$testns" ip link set dev $DEV_NS up
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       # test external mode
+       ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap external
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: gretap"
+               return 1
+       fi
+       echo "PASS: gretap"
+
+       ip netns del "$testns"
+}
+
+kci_test_ip6gretap()
+{
+       testns="testns"
+       DEV_NS=ip6gretap00
+       ret=0
+
+       ip netns add "$testns"
+       if [ $? -ne 0 ]; then
+               echo "SKIP ip6gretap tests: cannot add net namespace $testns"
+               return 1
+       fi
+
+       ip link help ip6gretap 2>&1 | grep -q "^Usage:"
+       if [ $? -ne 0 ];then
+               echo "SKIP: ip6gretap: iproute2 too old"
+               return 1
+       fi
+
+       # test native tunnel
+       ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap seq \
+               key 102 local fc00:100::1 remote fc00:100::2
+       check_err $?
+
+       ip netns exec "$testns" ip addr add dev "$DEV_NS" fc00:200::1/96
+       check_err $?
+
+       ip netns exec "$testns" ip link set dev $DEV_NS up
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       # test external mode
+       ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap external
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: ip6gretap"
+               return 1
+       fi
+       echo "PASS: ip6gretap"
+
+       ip netns del "$testns"
+}
+
 kci_test_rtnl()
 {
        kci_add_dummy
@@ -514,6 +610,8 @@ kci_test_rtnl()
        kci_test_route_get
        kci_test_tc
        kci_test_gre
+       kci_test_gretap
+       kci_test_ip6gretap
        kci_test_bridge
        kci_test_addrlabel
        kci_test_ifalias
index f9555b1e7f158f5203c1aaba47002424d3279203..cc29a814832837f5fb237dfdf74845a284e04367 100644 (file)
@@ -92,16 +92,23 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
 {
        struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
        struct arch_timer_context *vtimer;
+       u32 cnt_ctl;
 
-       if (!vcpu) {
-               pr_warn_once("Spurious arch timer IRQ on non-VCPU thread\n");
-               return IRQ_NONE;
-       }
-       vtimer = vcpu_vtimer(vcpu);
+       /*
+        * We may see a timer interrupt after vcpu_put() has been called which
+        * sets the CPU's vcpu pointer to NULL, because even though the timer
+        * has been disabled in vtimer_save_state(), the hardware interrupt
+        * signal may not have been retired from the interrupt controller yet.
+        */
+       if (!vcpu)
+               return IRQ_HANDLED;
 
+       vtimer = vcpu_vtimer(vcpu);
        if (!vtimer->irq.level) {
-               vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
-               if (kvm_timer_irq_can_fire(vtimer))
+               cnt_ctl = read_sysreg_el0(cntv_ctl);
+               cnt_ctl &= ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT |
+                          ARCH_TIMER_CTRL_IT_MASK;
+               if (cnt_ctl == (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
                        kvm_timer_update_irq(vcpu, true, vtimer);
        }
 
@@ -355,6 +362,7 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
 
        /* Disable the virtual timer */
        write_sysreg_el0(0, cntv_ctl);
+       isb();
 
        vtimer->loaded = false;
 out:
@@ -720,7 +728,7 @@ static int kvm_timer_dying_cpu(unsigned int cpu)
        return 0;
 }
 
-int kvm_timer_hyp_init(void)
+int kvm_timer_hyp_init(bool has_gic)
 {
        struct arch_timer_kvm_info *info;
        int err;
@@ -756,10 +764,13 @@ int kvm_timer_hyp_init(void)
                return err;
        }
 
-       err = irq_set_vcpu_affinity(host_vtimer_irq, kvm_get_running_vcpus());
-       if (err) {
-               kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
-               goto out_free_irq;
+       if (has_gic) {
+               err = irq_set_vcpu_affinity(host_vtimer_irq,
+                                           kvm_get_running_vcpus());
+               if (err) {
+                       kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
+                       goto out_free_irq;
+               }
        }
 
        kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
@@ -835,10 +846,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
 no_vgic:
        preempt_disable();
        timer->enabled = 1;
-       if (!irqchip_in_kernel(vcpu->kvm))
-               kvm_timer_vcpu_load_user(vcpu);
-       else
-               kvm_timer_vcpu_load_vgic(vcpu);
+       kvm_timer_vcpu_load(vcpu);
        preempt_enable();
 
        return 0;
index 6b60c98a6e2294c773eb20ea4794445a667415ea..2e43f9d42bd5db2a07438bb98f5e029c6246adb4 100644 (file)
@@ -1326,7 +1326,7 @@ static int init_subsystems(void)
        /*
         * Init HYP architected timer support
         */
-       err = kvm_timer_hyp_init();
+       err = kvm_timer_hyp_init(vgic_present);
        if (err)
                goto out;
 
index b6e715fd3c90af8c74408b72652f9974a3fb894d..dac7ceb1a677746cadb086a2cf8a07d8e560373c 100644 (file)
@@ -112,7 +112,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
                }
 
                trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
-                              data);
+                              &data);
                data = vcpu_data_host_to_guest(vcpu, data, len);
                vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
        }
@@ -182,14 +182,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
                data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
                                               len);
 
-               trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
+               trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
                kvm_mmio_write_buf(data_buf, len, data);
 
                ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
                                       data_buf);
        } else {
                trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
-                              fault_ipa, 0);
+                              fault_ipa, NULL);
 
                ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
                                      data_buf);
index b36945d49986dd5c0f097f16837d72d81f655308..b4b69c2d10120237e12bc6524243071bf645f1d9 100644 (file)
@@ -509,8 +509,6 @@ static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
  */
 void free_hyp_pgds(void)
 {
-       unsigned long addr;
-
        mutex_lock(&kvm_hyp_pgd_mutex);
 
        if (boot_hyp_pgd) {
@@ -521,10 +519,10 @@ void free_hyp_pgds(void)
 
        if (hyp_pgd) {
                unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
-               for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
-                       unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
-               for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
-                       unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
+               unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
+                               (uintptr_t)high_memory - PAGE_OFFSET);
+               unmap_hyp_range(hyp_pgd, kern_hyp_va(VMALLOC_START),
+                               VMALLOC_END - VMALLOC_START);
 
                free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
                hyp_pgd = NULL;